Commit Hash
stringlengths 40
40
| Author
stringclasses 38
values | Date
stringlengths 19
19
| Description
stringlengths 8
113
| Body
stringlengths 10
22.2k
| Footers
stringclasses 56
values | Commit Message
stringlengths 28
22.3k
| Git Diff
stringlengths 140
3.61M
⌀ |
|---|---|---|---|---|---|---|---|
c1db76bf9ee1fac273e83b7ec266afe88ef9bf2e
|
Dom Dwyer
|
2022-12-19 11:21:53
|
remove max seqnum in PartitionResponse
|
Removes the redundant max_persisted_sequence_number in
PartitionResponse, which was functionally replaced with
completed_persistence_count for the Querier's parquet file discovery
instead.
| null |
refactor: remove max seqnum in PartitionResponse
Removes the redundant max_persisted_sequence_number in
PartitionResponse, which was functionally replaced with
completed_persistence_count for the Querier's parquet file discovery
instead.
|
diff --git a/ingester2/src/buffer_tree/table.rs b/ingester2/src/buffer_tree/table.rs
index e109807710..d691e93e0c 100644
--- a/ingester2/src/buffer_tree/table.rs
+++ b/ingester2/src/buffer_tree/table.rs
@@ -294,9 +294,9 @@ where
let data = Box::pin(MemoryStream::new(
data.project_selection(selection).into_iter().collect(),
));
- PartitionResponse::new(data, id, None, completed_persistence_count)
+ PartitionResponse::new(data, id, completed_persistence_count)
}
- None => PartitionResponse::new_no_batches(id, None, completed_persistence_count),
+ None => PartitionResponse::new_no_batches(id, completed_persistence_count),
};
span.ok("read partition data");
diff --git a/ingester2/src/query/partition_response.rs b/ingester2/src/query/partition_response.rs
index 541cf8abcc..5221199666 100644
--- a/ingester2/src/query/partition_response.rs
+++ b/ingester2/src/query/partition_response.rs
@@ -2,7 +2,7 @@
//!
//! [`QueryResponse`]: super::response::QueryResponse
-use data_types::{PartitionId, SequenceNumber};
+use data_types::PartitionId;
use datafusion::physical_plan::SendableRecordBatchStream;
/// Response data for a single partition.
@@ -13,9 +13,6 @@ pub(crate) struct PartitionResponse {
/// Partition ID.
id: PartitionId,
- /// Max sequence number persisted
- max_persisted_sequence_number: Option<SequenceNumber>,
-
/// Count of persisted Parquet files for this partition by this ingester instance.
completed_persistence_count: u64,
}
@@ -31,7 +28,6 @@ impl std::fmt::Debug for PartitionResponse {
},
)
.field("partition_id", &self.id)
- .field("max_persisted", &self.max_persisted_sequence_number)
.field(
"completed_persistence_count",
&self.completed_persistence_count,
@@ -44,26 +40,19 @@ impl PartitionResponse {
pub(crate) fn new(
batches: SendableRecordBatchStream,
id: PartitionId,
- max_persisted_sequence_number: Option<SequenceNumber>,
completed_persistence_count: u64,
) -> Self {
Self {
batches: Some(batches),
id,
- max_persisted_sequence_number,
completed_persistence_count,
}
}
- pub(crate) fn new_no_batches(
- id: PartitionId,
- max_persisted_sequence_number: Option<SequenceNumber>,
- completed_persistence_count: u64,
- ) -> Self {
+ pub(crate) fn new_no_batches(id: PartitionId, completed_persistence_count: u64) -> Self {
Self {
batches: None,
id,
- max_persisted_sequence_number,
completed_persistence_count,
}
}
@@ -72,10 +61,6 @@ impl PartitionResponse {
self.id
}
- pub(crate) fn max_persisted_sequence_number(&self) -> Option<SequenceNumber> {
- self.max_persisted_sequence_number
- }
-
pub(crate) fn completed_persistence_count(&self) -> u64 {
self.completed_persistence_count
}
diff --git a/ingester2/src/server/grpc/query.rs b/ingester2/src/server/grpc/query.rs
index 3693a9ec06..af12138b72 100644
--- a/ingester2/src/server/grpc/query.rs
+++ b/ingester2/src/server/grpc/query.rs
@@ -311,13 +311,12 @@ impl From<QueryResponse> for FlatIngesterQueryResponseStream {
v.into_partition_stream()
.flat_map(|partition| {
let partition_id = partition.id();
- let max_seq = partition.max_persisted_sequence_number().map(|v| v.get());
let completed_persistence_count = partition.completed_persistence_count();
let head = futures::stream::once(async move {
Ok(FlatIngesterQueryResponse::StartPartition {
partition_id,
status: PartitionStatus {
- parquet_max_sequence_number: max_seq,
+ parquet_max_sequence_number: None,
},
completed_persistence_count,
})
|
76aa1ac3d64917fa8d0e306a50df7a40fdf9da8f
|
Stuart Carnie
|
2023-05-17 12:14:17
|
Refactor node used to represent a tag in a `GROUP BY`
|
* Use `VarRef` for `Tag`, to ensure a consistent representation of a
column reference across `GROUP BY`, `SELECT` projection and `WHERE`
clause.
* Rename `tags` to `tag_names`
* `tags` now returns `VarRef`
| null |
feat: Refactor node used to represent a tag in a `GROUP BY`
* Use `VarRef` for `Tag`, to ensure a consistent representation of a
column reference across `GROUP BY`, `SELECT` projection and `WHERE`
clause.
* Rename `tags` to `tag_names`
* `tags` now returns `VarRef`
|
diff --git a/influxdb_influxql_parser/src/select.rs b/influxdb_influxql_parser/src/select.rs
index 747c8b26a4..f0568c3293 100644
--- a/influxdb_influxql_parser/src/select.rs
+++ b/influxdb_influxql_parser/src/select.rs
@@ -263,10 +263,18 @@ impl GroupByClause {
})
}
+ /// Returns an iterator of all the names of the tag dimensions for the `GROUP BY` clause.
+ pub fn tag_names(&self) -> impl Iterator<Item = &Identifier> + '_ {
+ self.contents.iter().filter_map(|dim| match dim {
+ Dimension::VarRef(i) => Some(&i.name),
+ _ => None,
+ })
+ }
+
/// Returns an iterator of all the tag dimensions for the `GROUP BY` clause.
- pub fn tags(&self) -> impl Iterator<Item = &Identifier> + '_ {
+ pub fn tags(&self) -> impl Iterator<Item = &VarRef> + '_ {
self.contents.iter().filter_map(|dim| match dim {
- Dimension::Tag(i) => Some(i),
+ Dimension::VarRef(i) => Some(i),
_ => None,
})
}
@@ -354,7 +362,7 @@ pub enum Dimension {
Time(TimeDimension),
/// Represents a literal tag reference in a `GROUP BY` clause.
- Tag(Identifier),
+ VarRef(VarRef),
/// Represents a regular expression in a `GROUP BY` clause.
Regex(Regex),
@@ -367,7 +375,7 @@ impl Display for Dimension {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match self {
Self::Time(v) => Display::fmt(v, f),
- Self::Tag(v) => Display::fmt(v, f),
+ Self::VarRef(v) => Display::fmt(v, f),
Self::Regex(v) => Display::fmt(v, f),
Self::Wildcard => f.write_char('*'),
}
@@ -382,8 +390,8 @@ impl Parser for Dimension {
time_call_expression,
map(regex, Self::Regex),
map(var_ref, |v| {
- Self::Tag(match v {
- Expr::VarRef(VarRef { name, .. }) => name,
+ Self::VarRef(match v {
+ Expr::VarRef(var_ref) => var_ref,
// var_ref only returns Expr::VarRef
_ => unreachable!(),
})
@@ -1209,7 +1217,7 @@ mod test {
assert_matches!(got, Dimension::Time { .. });
let (_, got) = Dimension::parse("foo").unwrap();
- assert_matches!(got, Dimension::Tag(t) if t == "foo".into());
+ assert_matches!(got, Dimension::VarRef(VarRef { name, ..}) if name == "foo".into());
let (_, got) = Dimension::parse("/bar/").unwrap();
assert_matches!(got, Dimension::Regex(_));
@@ -1257,13 +1265,13 @@ mod test {
let (_, got) = group_by_clause("GROUP BY *, /foo/, TIME(5m), tag1, tag2").unwrap();
assert!(got.time_dimension().is_some());
assert_eq!(
- got.tags().cloned().collect::<Vec<_>>(),
+ got.tag_names().cloned().collect::<Vec<_>>(),
vec!["tag1".into(), "tag2".into()]
);
let (_, got) = group_by_clause("GROUP BY *, /foo/").unwrap();
assert!(got.time_dimension().is_none());
- assert_eq!(got.tags().count(), 0);
+ assert_eq!(got.tag_names().count(), 0);
}
#[test]
diff --git a/influxdb_influxql_parser/src/visit.rs b/influxdb_influxql_parser/src/visit.rs
index 809308c635..8b668dd701 100644
--- a/influxdb_influxql_parser/src/visit.rs
+++ b/influxdb_influxql_parser/src/visit.rs
@@ -1178,7 +1178,7 @@ impl Visitable for Dimension {
let visitor = match self {
Self::Time(v) => v.accept(visitor),
- Self::Tag(_) | Self::Regex(_) | Self::Wildcard => Ok(visitor),
+ Self::VarRef(_) | Self::Regex(_) | Self::Wildcard => Ok(visitor),
}?;
visitor.post_visit_select_dimension(self)
diff --git a/influxdb_influxql_parser/src/visit_mut.rs b/influxdb_influxql_parser/src/visit_mut.rs
index 88f8cf9b4f..bc2b6babfa 100644
--- a/influxdb_influxql_parser/src/visit_mut.rs
+++ b/influxdb_influxql_parser/src/visit_mut.rs
@@ -1128,7 +1128,7 @@ impl VisitableMut for Dimension {
match self {
Self::Time(v) => v.accept(visitor)?,
- Self::Tag(_) | Self::Regex(_) | Self::Wildcard => {}
+ Self::VarRef(_) | Self::Regex(_) | Self::Wildcard => {}
};
visitor.post_visit_select_dimension(self)
diff --git a/iox_query_influxql/src/plan/expr_type_evaluator.rs b/iox_query_influxql/src/plan/expr_type_evaluator.rs
index 189856ecec..7cf0a3fbba 100644
--- a/iox_query_influxql/src/plan/expr_type_evaluator.rs
+++ b/iox_query_influxql/src/plan/expr_type_evaluator.rs
@@ -10,6 +10,7 @@ use influxdb_influxql_parser::expression::{
use influxdb_influxql_parser::literal::Literal;
use influxdb_influxql_parser::select::Dimension;
use itertools::Itertools;
+use std::ops::Deref;
/// Evaluate the type of the specified expression.
///
@@ -155,7 +156,7 @@ impl<'a> TypeEvaluator<'a> {
if data_type.is_none() {
if let Some(group_by) = &select.group_by {
if group_by.iter().any(|dim| {
- matches!(dim, Dimension::Tag(ident) if ident.as_str() == expr.name.as_str())
+ matches!(dim, Dimension::VarRef(VarRef { name, ..}) if name.deref().as_str() == expr.name.as_str())
}) {
data_type = Some(VarRefDataType::Tag);
}
diff --git a/iox_query_influxql/src/plan/planner.rs b/iox_query_influxql/src/plan/planner.rs
index 6d4df48f52..13e452167d 100644
--- a/iox_query_influxql/src/plan/planner.rs
+++ b/iox_query_influxql/src/plan/planner.rs
@@ -193,7 +193,7 @@ impl<'a> Context<'a> {
(true, None) => vec![],
(false, None) => self.root_group_by_tags.to_vec(),
(_, Some(group_by)) => group_by
- .tags()
+ .tag_names()
.map(|ident| ident.deref().as_str())
.chain(self.root_group_by_tags.iter().copied())
.sorted()
@@ -312,7 +312,7 @@ impl<'a> InfluxQLToLogicalPlan<'a> {
let group_by_tags = if let Some(group_by) = select.group_by.as_ref() {
group_by
- .tags()
+ .tag_names()
.map(|ident| ident.deref().as_str())
.collect()
} else {
diff --git a/iox_query_influxql/src/plan/rewriter.rs b/iox_query_influxql/src/plan/rewriter.rs
index 45620c755d..5cd0d9b653 100644
--- a/iox_query_influxql/src/plan/rewriter.rs
+++ b/iox_query_influxql/src/plan/rewriter.rs
@@ -218,7 +218,7 @@ impl RewriteSelect {
if let Some(group_by) = &stmt.group_by {
// Remove any explicitly listed tags in the GROUP BY clause, so they are not
// expanded by any wildcards specified in the SELECT projection list
- group_by.tags().for_each(|ident| {
+ group_by.tag_names().for_each(|ident| {
tag_set.remove(ident.as_str());
});
}
@@ -259,7 +259,10 @@ impl RewriteSelect {
for dim in group_by.iter() {
let add_dim = |dim: &String| {
- new_dimensions.push(Dimension::Tag(Identifier::new(dim.clone())))
+ new_dimensions.push(Dimension::VarRef(VarRef {
+ name: Identifier::new(dim.clone()),
+ data_type: Some(VarRefDataType::Tag),
+ }))
};
match dim {
@@ -497,7 +500,7 @@ fn from_field_and_dimensions(
if let Some(group_by) = &select.group_by {
// Merge the dimensions from the subquery
- ts.extend(group_by.tags().map(|i| i.deref().to_string()));
+ ts.extend(group_by.tag_names().map(|i| i.deref().to_string()));
}
}
}
|
db8c8d5cc46a451dbc0f2b04294db4902f383897
|
Jure Bajic
|
2024-04-29 19:08:51
|
Add `with_params_from` method to clients query request builder (#24927)
|
Closes #24812
| null |
feat: Add `with_params_from` method to clients query request builder (#24927)
Closes #24812
|
diff --git a/Cargo.lock b/Cargo.lock
index deb4e26db7..f33155131f 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -4861,9 +4861,9 @@ dependencies = [
[[package]]
name = "rustls-pki-types"
-version = "1.4.1"
+version = "1.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ecd36cc4259e3e4514335c4a138c6b43171a8d61d8f5c9348f9fc7529416f247"
+checksum = "beb461507cee2c2ff151784c52762cf4d9ff6a61f3e80968600ed24fa837fa54"
[[package]]
name = "rustls-webpki"
@@ -6641,9 +6641,9 @@ checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
[[package]]
name = "winapi-util"
-version = "0.1.7"
+version = "0.1.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "134306a13c5647ad6453e8deaec55d3a44d6021970129e6188735e74bf546697"
+checksum = "4d4cc384e1e73b93bafa6fb4f1df8c41695c8a91cf9c4c64358067d15a7b6c6b"
dependencies = [
"windows-sys 0.52.0",
]
diff --git a/influxdb3_client/src/lib.rs b/influxdb3_client/src/lib.rs
index d35a7a7129..eb10ceb905 100644
--- a/influxdb3_client/src/lib.rs
+++ b/influxdb3_client/src/lib.rs
@@ -388,6 +388,50 @@ impl<'c> QueryRequestBuilder<'c> {
self
}
+ /// Set a query parameters from the given collection of pairs
+ ///
+ /// # Example
+ /// ```no_run
+ /// # use influxdb3_client::Client;
+ /// # #[tokio::main]
+ /// # async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
+ /// use serde_json::json;
+ /// use std::collections::HashMap;
+ ///
+ /// let client = Client::new("http://localhost:8181")?;
+ /// let response_bytes = client
+ /// .api_v3_query_sql("db_name", "SELECT * FROM foo WHERE bar = $bar AND foo > $fooz")
+ /// .with_params_from([
+ /// ("bar", json!(false)),
+ /// ("foo", json!(10)),
+ /// ])?
+ /// .send()
+ /// .await?;
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub fn with_params_from<S, P, C>(mut self, params: C) -> Result<Self>
+ where
+ S: Into<String>,
+ P: TryInto<StatementParam, Error = iox_query_params::Error>,
+ C: IntoIterator<Item = (S, P)>,
+ {
+ for (name, param) in params.into_iter() {
+ let name = name.into();
+ let param = param
+ .try_into()
+ .map_err(|source| Error::ConvertQueryParam {
+ name: name.clone(),
+ source,
+ })?;
+
+ self.params
+ .get_or_insert_with(Default::default)
+ .insert(name, param);
+ }
+ Ok(self)
+ }
+
/// Try to set a query parameter value with the given `name`
///
/// # Example
@@ -408,7 +452,7 @@ impl<'c> QueryRequestBuilder<'c> {
/// ```
pub fn with_try_param<S, P>(mut self, name: S, param: P) -> Result<Self>
where
- S: Into<String> + Clone,
+ S: Into<String>,
P: TryInto<StatementParam, Error = iox_query_params::Error>,
{
let name = name.into();
@@ -695,6 +739,49 @@ mod tests {
mock.assert_async().await;
+ r.expect("sent request successfully");
+ }
+ #[tokio::test]
+ async fn api_v3_query_influxql_with_params_from() {
+ let db = "stats";
+ let query = "SELECT * FROM foo WHERE a = $a AND b < $b AND c > $c AND d = $d";
+ let body = r#"[{"host": "foo", "time": "1990-07-23T06:00:00:000", "val": 1}]"#;
+
+ let mut mock_server = Server::new_async().await;
+ let mock = mock_server
+ .mock("POST", "/api/v3/query_influxql")
+ .match_body(Matcher::Json(serde_json::json!({
+ "db": db,
+ "q": query,
+ "params": {
+ "a": "bar",
+ "b": 123,
+ "c": 1.5,
+ "d": false
+ },
+ "format": null
+ })))
+ .with_status(200)
+ .with_body(body)
+ .create_async()
+ .await;
+
+ let client = Client::new(mock_server.url()).expect("create client");
+
+ let r = client
+ .api_v3_query_influxql(db, query)
+ .with_params_from([
+ ("a", json!("bar")),
+ ("b", json!(123)),
+ ("c", json!(1.5)),
+ ("d", json!(false)),
+ ])
+ .unwrap()
+ .send()
+ .await;
+
+ mock.assert_async().await;
+
r.expect("sent request successfully");
}
}
|
8ab553180e87eb47f3cb6e394f6c79ce104231b3
|
Andrew Lamb
|
2023-03-21 19:25:04
|
Update datafusion again (#7276)
|
* chore: Update DataFusion pin
* chore: Update for new Expr type
* fix: deprecated
* chore: Run cargo hakari tasks
* docs: Update predicate/src/lib.rs
* fix: fmt
---------
|
Co-authored-by: CircleCI[bot] <[email protected]>
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
chore: Update datafusion again (#7276)
* chore: Update DataFusion pin
* chore: Update for new Expr type
* fix: deprecated
* chore: Run cargo hakari tasks
* docs: Update predicate/src/lib.rs
* fix: fmt
---------
Co-authored-by: CircleCI[bot] <[email protected]>
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
diff --git a/Cargo.lock b/Cargo.lock
index 6cec08fcb6..13ee13989c 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -112,16 +112,16 @@ checksum = "f410d3907b6b3647b9e7bca4551274b2e3d716aa940afb67b7287257401da921"
dependencies = [
"ahash 0.8.3",
"arrow-arith",
- "arrow-array",
- "arrow-buffer",
+ "arrow-array 34.0.0",
+ "arrow-buffer 34.0.0",
"arrow-cast",
"arrow-csv",
- "arrow-data",
+ "arrow-data 34.0.0",
"arrow-ipc",
"arrow-json",
"arrow-ord",
"arrow-row",
- "arrow-schema",
+ "arrow-schema 34.0.0",
"arrow-select",
"arrow-string",
"comfy-table",
@@ -133,10 +133,10 @@ version = "34.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f87391cf46473c9bc53dab68cb8872c3a81d4dfd1703f1c8aa397dba9880a043"
dependencies = [
- "arrow-array",
- "arrow-buffer",
- "arrow-data",
- "arrow-schema",
+ "arrow-array 34.0.0",
+ "arrow-buffer 34.0.0",
+ "arrow-data 34.0.0",
+ "arrow-schema 34.0.0",
"chrono",
"half 2.2.1",
"num",
@@ -149,15 +149,32 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d35d5475e65c57cffba06d0022e3006b677515f99b54af33a7cd54f6cdd4a5b5"
dependencies = [
"ahash 0.8.3",
- "arrow-buffer",
- "arrow-data",
- "arrow-schema",
+ "arrow-buffer 34.0.0",
+ "arrow-data 34.0.0",
+ "arrow-schema 34.0.0",
"chrono",
"half 2.2.1",
"hashbrown 0.13.2",
"num",
]
+[[package]]
+name = "arrow-array"
+version = "35.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "43489bbff475545b78b0e20bde1d22abd6c99e54499839f9e815a2fa5134a51b"
+dependencies = [
+ "ahash 0.8.3",
+ "arrow-buffer 35.0.0",
+ "arrow-data 35.0.0",
+ "arrow-schema 35.0.0",
+ "chrono",
+ "chrono-tz",
+ "half 2.2.1",
+ "hashbrown 0.13.2",
+ "num",
+]
+
[[package]]
name = "arrow-buffer"
version = "34.0.0"
@@ -168,16 +185,26 @@ dependencies = [
"num",
]
+[[package]]
+name = "arrow-buffer"
+version = "35.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a3759e4a52c593281184787af5435671dc8b1e78333e5a30242b2e2d6e3c9d1f"
+dependencies = [
+ "half 2.2.1",
+ "num",
+]
+
[[package]]
name = "arrow-cast"
version = "34.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0a7285272c9897321dfdba59de29f5b05aeafd3cdedf104a941256d155f6d304"
dependencies = [
- "arrow-array",
- "arrow-buffer",
- "arrow-data",
- "arrow-schema",
+ "arrow-array 34.0.0",
+ "arrow-buffer 34.0.0",
+ "arrow-data 34.0.0",
+ "arrow-schema 34.0.0",
"arrow-select",
"chrono",
"lexical-core",
@@ -190,11 +217,11 @@ version = "34.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "981ee4e7f6a120da04e00d0b39182e1eeacccb59c8da74511de753c56b7fddf7"
dependencies = [
- "arrow-array",
- "arrow-buffer",
+ "arrow-array 34.0.0",
+ "arrow-buffer 34.0.0",
"arrow-cast",
- "arrow-data",
- "arrow-schema",
+ "arrow-data 34.0.0",
+ "arrow-schema 34.0.0",
"chrono",
"csv",
"csv-core",
@@ -209,8 +236,20 @@ version = "34.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "27cc673ee6989ea6e4b4e8c7d461f7e06026a096c8f0b1a7288885ff71ae1e56"
dependencies = [
- "arrow-buffer",
- "arrow-schema",
+ "arrow-buffer 34.0.0",
+ "arrow-schema 34.0.0",
+ "half 2.2.1",
+ "num",
+]
+
+[[package]]
+name = "arrow-data"
+version = "35.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "19c7787c6cdbf9539b1ffb860bfc18c5848926ec3d62cbd52dc3b1ea35c874fd"
+dependencies = [
+ "arrow-buffer 35.0.0",
+ "arrow-schema 35.0.0",
"half 2.2.1",
"num",
]
@@ -221,11 +260,11 @@ version = "34.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bd16945f8f3be0f6170b8ced60d414e56239d91a16a3f8800bc1504bc58b2592"
dependencies = [
- "arrow-array",
- "arrow-buffer",
+ "arrow-array 34.0.0",
+ "arrow-buffer 34.0.0",
"arrow-cast",
"arrow-ipc",
- "arrow-schema",
+ "arrow-schema 34.0.0",
"base64 0.21.0",
"bytes",
"futures",
@@ -244,11 +283,11 @@ version = "34.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e37b8b69d9e59116b6b538e8514e0ec63a30f08b617ce800d31cb44e3ef64c1a"
dependencies = [
- "arrow-array",
- "arrow-buffer",
+ "arrow-array 34.0.0",
+ "arrow-buffer 34.0.0",
"arrow-cast",
- "arrow-data",
- "arrow-schema",
+ "arrow-data 34.0.0",
+ "arrow-schema 34.0.0",
"flatbuffers",
]
@@ -258,11 +297,11 @@ version = "34.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "80c3fa0bed7cfebf6d18e46b733f9cb8a1cb43ce8e6539055ca3e1e48a426266"
dependencies = [
- "arrow-array",
- "arrow-buffer",
+ "arrow-array 34.0.0",
+ "arrow-buffer 34.0.0",
"arrow-cast",
- "arrow-data",
- "arrow-schema",
+ "arrow-data 34.0.0",
+ "arrow-schema 34.0.0",
"chrono",
"half 2.2.1",
"indexmap",
@@ -277,10 +316,10 @@ version = "34.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d247dce7bed6a8d6a3c6debfa707a3a2f694383f0c692a39d736a593eae5ef94"
dependencies = [
- "arrow-array",
- "arrow-buffer",
- "arrow-data",
- "arrow-schema",
+ "arrow-array 34.0.0",
+ "arrow-buffer 34.0.0",
+ "arrow-data 34.0.0",
+ "arrow-schema 34.0.0",
"arrow-select",
"num",
]
@@ -292,10 +331,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8d609c0181f963cea5c70fddf9a388595b5be441f3aa1d1cdbf728ca834bbd3a"
dependencies = [
"ahash 0.8.3",
- "arrow-array",
- "arrow-buffer",
- "arrow-data",
- "arrow-schema",
+ "arrow-array 34.0.0",
+ "arrow-buffer 34.0.0",
+ "arrow-data 34.0.0",
+ "arrow-schema 34.0.0",
"half 2.2.1",
"hashbrown 0.13.2",
]
@@ -306,16 +345,22 @@ version = "34.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "64951898473bfb8e22293e83a44f02874d2257514d49cd95f9aa4afcff183fbc"
+[[package]]
+name = "arrow-schema"
+version = "35.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bf6b26f6a6f8410e3b9531cbd1886399b99842701da77d4b4cf2013f7708f20f"
+
[[package]]
name = "arrow-select"
version = "34.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2a513d89c2e1ac22b28380900036cf1f3992c6443efc5e079de631dcf83c6888"
dependencies = [
- "arrow-array",
- "arrow-buffer",
- "arrow-data",
- "arrow-schema",
+ "arrow-array 34.0.0",
+ "arrow-buffer 34.0.0",
+ "arrow-data 34.0.0",
+ "arrow-schema 34.0.0",
"num",
]
@@ -325,10 +370,10 @@ version = "34.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5288979b2705dae1114c864d73150629add9153b9b8f1d7ee3963db94c372ba5"
dependencies = [
- "arrow-array",
- "arrow-buffer",
- "arrow-data",
- "arrow-schema",
+ "arrow-array 34.0.0",
+ "arrow-buffer 34.0.0",
+ "arrow-data 34.0.0",
+ "arrow-schema 34.0.0",
"arrow-select",
"regex",
"regex-syntax",
@@ -1451,7 +1496,7 @@ dependencies = [
[[package]]
name = "datafusion"
version = "20.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=4afd67a0e496e1834ad6184629f28e60f66b2777#4afd67a0e496e1834ad6184629f28e60f66b2777"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=26e1b20ea3362ea62cb713004a0636b8af6a16d7#26e1b20ea3362ea62cb713004a0636b8af6a16d7"
dependencies = [
"ahash 0.8.3",
"arrow",
@@ -1498,9 +1543,10 @@ dependencies = [
[[package]]
name = "datafusion-common"
version = "20.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=4afd67a0e496e1834ad6184629f28e60f66b2777#4afd67a0e496e1834ad6184629f28e60f66b2777"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=26e1b20ea3362ea62cb713004a0636b8af6a16d7#26e1b20ea3362ea62cb713004a0636b8af6a16d7"
dependencies = [
"arrow",
+ "arrow-array 35.0.0",
"chrono",
"num_cpus",
"object_store",
@@ -1511,7 +1557,7 @@ dependencies = [
[[package]]
name = "datafusion-execution"
version = "20.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=4afd67a0e496e1834ad6184629f28e60f66b2777#4afd67a0e496e1834ad6184629f28e60f66b2777"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=26e1b20ea3362ea62cb713004a0636b8af6a16d7#26e1b20ea3362ea62cb713004a0636b8af6a16d7"
dependencies = [
"dashmap",
"datafusion-common",
@@ -1528,7 +1574,7 @@ dependencies = [
[[package]]
name = "datafusion-expr"
version = "20.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=4afd67a0e496e1834ad6184629f28e60f66b2777#4afd67a0e496e1834ad6184629f28e60f66b2777"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=26e1b20ea3362ea62cb713004a0636b8af6a16d7#26e1b20ea3362ea62cb713004a0636b8af6a16d7"
dependencies = [
"ahash 0.8.3",
"arrow",
@@ -1539,7 +1585,7 @@ dependencies = [
[[package]]
name = "datafusion-optimizer"
version = "20.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=4afd67a0e496e1834ad6184629f28e60f66b2777#4afd67a0e496e1834ad6184629f28e60f66b2777"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=26e1b20ea3362ea62cb713004a0636b8af6a16d7#26e1b20ea3362ea62cb713004a0636b8af6a16d7"
dependencies = [
"arrow",
"async-trait",
@@ -1556,12 +1602,12 @@ dependencies = [
[[package]]
name = "datafusion-physical-expr"
version = "20.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=4afd67a0e496e1834ad6184629f28e60f66b2777#4afd67a0e496e1834ad6184629f28e60f66b2777"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=26e1b20ea3362ea62cb713004a0636b8af6a16d7#26e1b20ea3362ea62cb713004a0636b8af6a16d7"
dependencies = [
"ahash 0.8.3",
"arrow",
- "arrow-buffer",
- "arrow-schema",
+ "arrow-buffer 34.0.0",
+ "arrow-schema 34.0.0",
"blake2",
"blake3",
"chrono",
@@ -1586,7 +1632,7 @@ dependencies = [
[[package]]
name = "datafusion-proto"
version = "20.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=4afd67a0e496e1834ad6184629f28e60f66b2777#4afd67a0e496e1834ad6184629f28e60f66b2777"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=26e1b20ea3362ea62cb713004a0636b8af6a16d7#26e1b20ea3362ea62cb713004a0636b8af6a16d7"
dependencies = [
"arrow",
"chrono",
@@ -1602,7 +1648,7 @@ dependencies = [
[[package]]
name = "datafusion-row"
version = "20.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=4afd67a0e496e1834ad6184629f28e60f66b2777#4afd67a0e496e1834ad6184629f28e60f66b2777"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=26e1b20ea3362ea62cb713004a0636b8af6a16d7#26e1b20ea3362ea62cb713004a0636b8af6a16d7"
dependencies = [
"arrow",
"datafusion-common",
@@ -1613,9 +1659,9 @@ dependencies = [
[[package]]
name = "datafusion-sql"
version = "20.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=4afd67a0e496e1834ad6184629f28e60f66b2777#4afd67a0e496e1834ad6184629f28e60f66b2777"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=26e1b20ea3362ea62cb713004a0636b8af6a16d7#26e1b20ea3362ea62cb713004a0636b8af6a16d7"
dependencies = [
- "arrow-schema",
+ "arrow-schema 34.0.0",
"datafusion-common",
"datafusion-expr",
"log",
@@ -4083,12 +4129,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7ac135ecf63ebb5f53dda0921b0b76d6048b3ef631a5f4760b9e8f863ff00cfa"
dependencies = [
"ahash 0.8.3",
- "arrow-array",
- "arrow-buffer",
+ "arrow-array 34.0.0",
+ "arrow-buffer 34.0.0",
"arrow-cast",
- "arrow-data",
+ "arrow-data 34.0.0",
"arrow-ipc",
- "arrow-schema",
+ "arrow-schema 34.0.0",
"arrow-select",
"base64 0.21.0",
"brotli",
diff --git a/Cargo.toml b/Cargo.toml
index 0545e12ff1..feda8d34d9 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -120,8 +120,8 @@ license = "MIT OR Apache-2.0"
[workspace.dependencies]
arrow = { version = "34.0.0" }
arrow-flight = { version = "34.0.0" }
-datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev="4afd67a0e496e1834ad6184629f28e60f66b2777", default-features = false }
-datafusion-proto = { git = "https://github.com/apache/arrow-datafusion.git", rev="4afd67a0e496e1834ad6184629f28e60f66b2777" }
+datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev="26e1b20ea3362ea62cb713004a0636b8af6a16d7", default-features = false }
+datafusion-proto = { git = "https://github.com/apache/arrow-datafusion.git", rev="26e1b20ea3362ea62cb713004a0636b8af6a16d7" }
hashbrown = { version = "0.13.2" }
parquet = { version = "34.0.0" }
diff --git a/iox_query_influxql/src/frontend/planner.rs b/iox_query_influxql/src/frontend/planner.rs
index b95fc64a30..a586a29011 100644
--- a/iox_query_influxql/src/frontend/planner.rs
+++ b/iox_query_influxql/src/frontend/planner.rs
@@ -150,7 +150,7 @@ impl InfluxQLQueryPlanner {
use std::collections::hash_map::Entry;
let session_cfg = ctx.inner().copied_config();
- let cfg = session_cfg.config_options();
+ let cfg = session_cfg.options();
let schema = ctx
.inner()
.catalog(&cfg.catalog.default_catalog)
diff --git a/predicate/src/lib.rs b/predicate/src/lib.rs
index fc6508bc2f..824a9b8660 100644
--- a/predicate/src/lib.rs
+++ b/predicate/src/lib.rs
@@ -600,7 +600,18 @@ impl From<ValueExpr> for Expr {
}
}
-/// Recursively walk an expression tree, checking if the expression is row-based.
+/// Recursively walk an expression tree, checking if the expression is
+/// row-based.
+///
+/// A row-based function takes one row in and produces
+/// one value as output.
+///
+/// Note that even though a predicate expression like `col < 5` can be used to
+/// filter rows, the expression itself is row-based (produces a single boolean).
+///
+/// Examples of non row based expressions are Aggregate and
+/// Window function which produce different cardinality than their
+/// input.
struct RowBasedVisitor {
row_based: bool,
}
@@ -637,6 +648,7 @@ impl ExpressionVisitor for RowBasedVisitor {
| Expr::Literal(_)
| Expr::Negative(_)
| Expr::Not(_)
+ | Expr::OuterReferenceColumn(_, _)
| Expr::Placeholder { .. }
| Expr::QualifiedWildcard { .. }
| Expr::ScalarFunction { .. }
diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml
index efc9719d21..55e6fd3222 100644
--- a/workspace-hack/Cargo.toml
+++ b/workspace-hack/Cargo.toml
@@ -29,9 +29,9 @@ bytes = { version = "1" }
chrono = { version = "0.4", default-features = false, features = ["alloc", "clock", "serde"] }
crossbeam-utils = { version = "0.8" }
crypto-common = { version = "0.1", default-features = false, features = ["std"] }
-datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "4afd67a0e496e1834ad6184629f28e60f66b2777" }
-datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "4afd67a0e496e1834ad6184629f28e60f66b2777", default-features = false, features = ["crypto_expressions", "regex_expressions", "unicode_expressions"] }
-datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "4afd67a0e496e1834ad6184629f28e60f66b2777", default-features = false, features = ["crypto_expressions", "regex_expressions", "unicode_expressions"] }
+datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e1b20ea3362ea62cb713004a0636b8af6a16d7" }
+datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e1b20ea3362ea62cb713004a0636b8af6a16d7", default-features = false, features = ["crypto_expressions", "regex_expressions", "unicode_expressions"] }
+datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e1b20ea3362ea62cb713004a0636b8af6a16d7", default-features = false, features = ["crypto_expressions", "regex_expressions", "unicode_expressions"] }
digest = { version = "0.10", features = ["mac", "std"] }
either = { version = "1" }
fixedbitset = { version = "0.4" }
|
fbbaf155ec6a7caa41ce872bbb908898f4a47377
|
Marco Neumann
|
2023-07-19 11:20:40
|
ability to batch cache loader requests (#8259)
|
Core functionality for #8089.
|
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
feat: ability to batch cache loader requests (#8259)
Core functionality for #8089.
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
diff --git a/cache_system/src/loader/batch.rs b/cache_system/src/loader/batch.rs
new file mode 100644
index 0000000000..2ee0fbb2d2
--- /dev/null
+++ b/cache_system/src/loader/batch.rs
@@ -0,0 +1,485 @@
+//! Batching of loader request.
+use std::{
+ collections::HashMap,
+ fmt::Debug,
+ future::Future,
+ hash::Hash,
+ sync::{
+ atomic::{AtomicU64, Ordering},
+ Arc,
+ },
+ task::Poll,
+};
+
+use async_trait::async_trait;
+use futures::FutureExt;
+use observability_deps::tracing::trace;
+use parking_lot::Mutex;
+use tokio::sync::oneshot::{channel, Sender};
+
+use crate::cancellation_safe_future::{CancellationSafeFuture, CancellationSafeFutureReceiver};
+
+use super::Loader;
+
+/// Batch [load](Loader::load) requests.
+///
+/// Requests against this loader will be [pending](std::task::Poll::Pending) until [flush](BatchLoaderFlusher::flush) is
+/// called. To simplify the usage -- esp. in combination with [`Cache::get`] -- use [`BatchLoaderFlusherExt`].
+///
+///
+/// [`Cache::get`]: crate::cache::Cache::get
+#[derive(Debug)]
+pub struct BatchLoader<K, Extra, V, L>
+where
+ K: Debug + Hash + Send + 'static,
+ Extra: Debug + Send + 'static,
+ V: Debug + Send + 'static,
+ L: Loader<K = Vec<K>, Extra = Vec<Extra>, V = Vec<V>>,
+{
+ inner: Arc<BatchLoaderInner<K, Extra, V, L>>,
+}
+
+impl<K, Extra, V, L> BatchLoader<K, Extra, V, L>
+where
+ K: Debug + Hash + Send + 'static,
+ Extra: Debug + Send + 'static,
+ V: Debug + Send + 'static,
+ L: Loader<K = Vec<K>, Extra = Vec<Extra>, V = Vec<V>>,
+{
+ /// Create new batch loader based on a non-batched, vector-based one.
+ pub fn new(inner: L) -> Self {
+ Self {
+ inner: Arc::new(BatchLoaderInner {
+ inner,
+ pending: Default::default(),
+ job_id_counter: Default::default(),
+ job_handles: Default::default(),
+ }),
+ }
+ }
+}
+
+/// State of [`BatchLoader`].
+///
+/// This is an extra struct so it can be wrapped into an [`Arc`] and shared with the futures that are spawned into
+/// [`CancellationSafeFuture`]
+#[derive(Debug)]
+struct BatchLoaderInner<K, Extra, V, L>
+where
+ K: Debug + Hash + Send + 'static,
+ Extra: Debug + Send + 'static,
+ V: Debug + Send + 'static,
+ L: Loader<K = Vec<K>, Extra = Vec<Extra>, V = Vec<V>>,
+{
+ inner: L,
+ pending: Mutex<Vec<(K, Extra, Sender<V>)>>,
+ job_id_counter: AtomicU64,
+ job_handles: Mutex<HashMap<u64, CancellationSafeFutureReceiver<()>>>,
+}
+
+/// Flush interface for [`BatchLoader`].
+///
+/// This is a trait so you can [type-erase](https://en.wikipedia.org/wiki/Type_erasure) it by putting it into an
+/// [`Arc`],
+///
+/// This trait is object-safe.
+#[async_trait]
+pub trait BatchLoaderFlusher: Debug + Send + Sync + 'static {
+ /// Flush all batched requests.
+ async fn flush(&self);
+}
+
+#[async_trait]
+impl BatchLoaderFlusher for Arc<dyn BatchLoaderFlusher> {
+ async fn flush(&self) {
+ self.as_ref().flush().await;
+ }
+}
+
+#[async_trait]
+impl<K, Extra, V, L> BatchLoaderFlusher for BatchLoader<K, Extra, V, L>
+where
+ K: Debug + Hash + Send + 'static,
+ Extra: Debug + Send + 'static,
+ V: Debug + Send + 'static,
+ L: Loader<K = Vec<K>, Extra = Vec<Extra>, V = Vec<V>>,
+{
+ async fn flush(&self) {
+ trace!("flushing batch loader");
+
+ let pending: Vec<_> = {
+ let mut pending = self.inner.pending.lock();
+ std::mem::take(pending.as_mut())
+ };
+
+ if pending.is_empty() {
+ return;
+ }
+ let job_id = self.inner.job_id_counter.fetch_add(1, Ordering::SeqCst);
+ let handle_recv = CancellationSafeFutureReceiver::default();
+
+ {
+ let mut job_handles = self.inner.job_handles.lock();
+ job_handles.insert(job_id, handle_recv.clone());
+ }
+
+ let inner = Arc::clone(&self.inner);
+ let fut = CancellationSafeFuture::new(
+ async move {
+ let mut keys = Vec::with_capacity(pending.len());
+ let mut extras = Vec::with_capacity(pending.len());
+ let mut senders = Vec::with_capacity(pending.len());
+
+ for (k, extra, sender) in pending {
+ keys.push(k);
+ extras.push(extra);
+ senders.push(sender);
+ }
+
+ let values = inner.inner.load(keys, extras).await;
+ assert_eq!(values.len(), senders.len());
+
+ for (value, sender) in values.into_iter().zip(senders) {
+ sender.send(value).unwrap();
+ }
+
+ let mut job_handles = inner.job_handles.lock();
+ job_handles.remove(&job_id);
+ },
+ handle_recv,
+ );
+ fut.await;
+ }
+}
+
+#[async_trait]
+impl<K, Extra, V, L> Loader for BatchLoader<K, Extra, V, L>
+where
+ K: Debug + Hash + Send + 'static,
+ Extra: Debug + Send + 'static,
+ V: Debug + Send + 'static,
+ L: Loader<K = Vec<K>, Extra = Vec<Extra>, V = Vec<V>>,
+{
+ type K = K;
+ type Extra = Extra;
+ type V = V;
+
+ async fn load(&self, k: Self::K, extra: Self::Extra) -> Self::V {
+ let (tx, rx) = channel();
+
+ {
+ let mut pending = self.inner.pending.lock();
+ pending.push((k, extra, tx));
+ }
+
+ rx.await.unwrap()
+ }
+}
+
+/// Extension trait for [`BatchLoaderFlusher`] because the methods on this extension trait are not object safe.
+#[async_trait]
+pub trait BatchLoaderFlusherExt {
+ /// Try to poll all given futures and automatically [flush](BatchLoaderFlusher) if any of them end up in a pending state.
+ ///
+ /// This guarantees that the order of the results is identical to the order of the futures.
+ async fn auto_flush<F>(&self, futures: Vec<F>) -> Vec<F::Output>
+ where
+ F: Future + Send,
+ F::Output: Send;
+}
+
+#[async_trait]
+impl<B> BatchLoaderFlusherExt for B
+where
+ B: BatchLoaderFlusher,
+{
+ async fn auto_flush<F>(&self, futures: Vec<F>) -> Vec<F::Output>
+ where
+ F: Future + Send,
+ F::Output: Send,
+ {
+ let mut futures = futures
+ .into_iter()
+ .map(|f| f.boxed())
+ .enumerate()
+ .collect::<Vec<_>>();
+ let mut output: Vec<Option<F::Output>> = (0..futures.len()).map(|_| None).collect();
+
+ while !futures.is_empty() {
+ let mut pending = Vec::with_capacity(futures.len());
+
+ for (idx, mut f) in futures.into_iter() {
+ match futures::poll!(&mut f) {
+ Poll::Ready(res) => {
+ output[idx] = Some(res);
+ }
+ Poll::Pending => {
+ pending.push((idx, f));
+ }
+ }
+ }
+
+ if !pending.is_empty() {
+ self.flush().await;
+ }
+
+ futures = pending;
+ }
+
+ output
+ .into_iter()
+ .map(|o| o.expect("all futures finished"))
+ .collect()
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use tokio::sync::Barrier;
+
+ use crate::{
+ cache::{driver::CacheDriver, Cache},
+ loader::test_util::TestLoader,
+ test_util::EnsurePendingExt,
+ };
+
+ use super::*;
+
+ type TestLoaderT = Arc<TestLoader<Vec<u8>, Vec<bool>, Vec<String>>>;
+
+ #[tokio::test]
+ async fn test_flush_empty() {
+ let (inner, batch) = setup();
+ batch.flush().await;
+ assert_eq!(inner.loaded(), vec![],);
+ }
+
+ #[tokio::test]
+ async fn test_flush_manual() {
+ let (inner, batch) = setup();
+
+ let pending_barrier_1 = Arc::new(Barrier::new(2));
+ let pending_barrier_1_captured = Arc::clone(&pending_barrier_1);
+ let batch_captured = Arc::clone(&batch);
+ let handle_1 = tokio::spawn(async move {
+ batch_captured
+ .load(1, true)
+ .ensure_pending(pending_barrier_1_captured)
+ .await
+ });
+ pending_barrier_1.wait().await;
+
+ let pending_barrier_2 = Arc::new(Barrier::new(2));
+ let pending_barrier_2_captured = Arc::clone(&pending_barrier_2);
+ let batch_captured = Arc::clone(&batch);
+ let handle_2 = tokio::spawn(async move {
+ batch_captured
+ .load(2, false)
+ .ensure_pending(pending_barrier_2_captured)
+ .await
+ });
+ pending_barrier_2.wait().await;
+
+ inner.mock_next(vec![1, 2], vec![String::from("foo"), String::from("bar")]);
+
+ batch.flush().await;
+ assert_eq!(inner.loaded(), vec![(vec![1, 2], vec![true, false])],);
+
+ assert_eq!(handle_1.await.unwrap(), String::from("foo"));
+ assert_eq!(handle_2.await.unwrap(), String::from("bar"));
+ }
+
+ /// Simulate the following scenario:
+ ///
+ /// 1. load `1`, flush it, inner load starts processing `[1]`
+ /// 2. load `2`, flush it, inner load starts processing `[2]`
+ /// 3. inner loader returns result for `[2]`, batch loader returns that result as well
+ /// 4. inner loader returns result for `[1]`, batch loader returns that result as well
+ #[tokio::test]
+ async fn test_concurrent_load() {
+ let (inner, batch) = setup();
+
+ let load_barrier_1 = inner.block_next(vec![1], vec![String::from("foo")]);
+ inner.mock_next(vec![2], vec![String::from("bar")]);
+
+ // set up first load
+ let pending_barrier_1 = Arc::new(Barrier::new(2));
+ let pending_barrier_1_captured = Arc::clone(&pending_barrier_1);
+ let batch_captured = Arc::clone(&batch);
+ let handle_1 = tokio::spawn(async move {
+ batch_captured
+ .load(1, true)
+ .ensure_pending(pending_barrier_1_captured)
+ .await
+ });
+ pending_barrier_1.wait().await;
+
+ // flush first load, this is blocked by the load barrier
+ let pending_barrier_2 = Arc::new(Barrier::new(2));
+ let pending_barrier_2_captured = Arc::clone(&pending_barrier_2);
+ let batch_captured = Arc::clone(&batch);
+ let handle_2 = tokio::spawn(async move {
+ batch_captured
+ .flush()
+ .ensure_pending(pending_barrier_2_captured)
+ .await;
+ });
+ pending_barrier_2.wait().await;
+
+ // set up second load
+ let pending_barrier_3 = Arc::new(Barrier::new(2));
+ let pending_barrier_3_captured = Arc::clone(&pending_barrier_3);
+ let batch_captured = Arc::clone(&batch);
+ let handle_3 = tokio::spawn(async move {
+ batch_captured
+ .load(2, false)
+ .ensure_pending(pending_barrier_3_captured)
+ .await
+ });
+ pending_barrier_3.wait().await;
+
+ // flush 2nd load and get result
+ batch.flush().await;
+ assert_eq!(handle_3.await.unwrap(), String::from("bar"));
+
+ // flush 1st load and get result
+ load_barrier_1.wait().await;
+ handle_2.await.unwrap();
+ assert_eq!(handle_1.await.unwrap(), String::from("foo"));
+
+ assert_eq!(
+ inner.loaded(),
+ vec![(vec![1], vec![true]), (vec![2], vec![false])],
+ );
+ }
+
+ #[tokio::test]
+ async fn test_cancel_flush() {
+ let (inner, batch) = setup();
+
+ let load_barrier_1 = inner.block_next(vec![1], vec![String::from("foo")]);
+
+ // set up load
+ let pending_barrier_1 = Arc::new(Barrier::new(2));
+ let pending_barrier_1_captured = Arc::clone(&pending_barrier_1);
+ let batch_captured = Arc::clone(&batch);
+ let handle_1 = tokio::spawn(async move {
+ batch_captured
+ .load(1, true)
+ .ensure_pending(pending_barrier_1_captured)
+ .await
+ });
+ pending_barrier_1.wait().await;
+
+ // flush load, this is blocked by the load barrier
+ let pending_barrier_2 = Arc::new(Barrier::new(2));
+ let pending_barrier_2_captured = Arc::clone(&pending_barrier_2);
+ let batch_captured = Arc::clone(&batch);
+ let handle_2 = tokio::spawn(async move {
+ batch_captured
+ .flush()
+ .ensure_pending(pending_barrier_2_captured)
+ .await;
+ });
+ pending_barrier_2.wait().await;
+
+ // abort flush
+ handle_2.abort();
+
+ // flush load and get result
+ load_barrier_1.wait().await;
+ assert_eq!(handle_1.await.unwrap(), String::from("foo"));
+
+ assert_eq!(inner.loaded(), vec![(vec![1], vec![true])],);
+ }
+
+ #[tokio::test]
+ async fn test_cancel_load_and_flush() {
+ let (inner, batch) = setup();
+
+ let load_barrier_1 = inner.block_next(vec![1], vec![String::from("foo")]);
+
+ // set up load
+ let pending_barrier_1 = Arc::new(Barrier::new(2));
+ let pending_barrier_1_captured = Arc::clone(&pending_barrier_1);
+ let batch_captured = Arc::clone(&batch);
+ let handle_1 = tokio::spawn(async move {
+ batch_captured
+ .load(1, true)
+ .ensure_pending(pending_barrier_1_captured)
+ .await
+ });
+ pending_barrier_1.wait().await;
+
+ // flush load, this is blocked by the load barrier
+ let pending_barrier_2 = Arc::new(Barrier::new(2));
+ let pending_barrier_2_captured = Arc::clone(&pending_barrier_2);
+ let batch_captured = Arc::clone(&batch);
+ let handle_2 = tokio::spawn(async move {
+ batch_captured
+ .flush()
+ .ensure_pending(pending_barrier_2_captured)
+ .await;
+ });
+ pending_barrier_2.wait().await;
+
+ // abort load and flush
+ handle_1.abort();
+ handle_2.abort();
+
+ // unblock
+ load_barrier_1.wait().await;
+
+ // load was still driven to completion
+ assert_eq!(inner.loaded(), vec![(vec![1], vec![true])],);
+ }
+
+ #[tokio::test]
+ async fn test_auto_flush_with_loader() {
+ let (inner, batch) = setup();
+
+ inner.mock_next(vec![1, 2], vec![String::from("foo"), String::from("bar")]);
+
+ assert_eq!(
+ batch
+ .auto_flush(vec![batch.load(1, true), batch.load(2, false)])
+ .await,
+ vec![String::from("foo"), String::from("bar")],
+ );
+
+ assert_eq!(inner.loaded(), vec![(vec![1, 2], vec![true, false])],);
+ }
+
+ #[tokio::test]
+ async fn test_auto_flush_integration_with_cache_driver() {
+ let (inner, batch) = setup();
+ let cache = CacheDriver::new(Arc::clone(&batch), HashMap::new());
+
+ inner.mock_next(vec![1, 2], vec![String::from("foo"), String::from("bar")]);
+ inner.mock_next(vec![3], vec![String::from("baz")]);
+
+ assert_eq!(
+ batch
+ .auto_flush(vec![cache.get(1, true), cache.get(2, false)])
+ .await,
+ vec![String::from("foo"), String::from("bar")],
+ );
+ assert_eq!(
+ batch
+ .auto_flush(vec![cache.get(2, true), cache.get(3, true)])
+ .await,
+ vec![String::from("bar"), String::from("baz")],
+ );
+
+ assert_eq!(
+ inner.loaded(),
+ vec![(vec![1, 2], vec![true, false]), (vec![3], vec![true])],
+ );
+ }
+
+ fn setup() -> (TestLoaderT, Arc<BatchLoader<u8, bool, String, TestLoaderT>>) {
+ let inner = TestLoaderT::default();
+ let batch = Arc::new(BatchLoader::new(Arc::clone(&inner)));
+ (inner, batch)
+ }
+}
diff --git a/cache_system/src/loader/mod.rs b/cache_system/src/loader/mod.rs
index f85f6bd180..6c429a7fc2 100644
--- a/cache_system/src/loader/mod.rs
+++ b/cache_system/src/loader/mod.rs
@@ -2,6 +2,7 @@
use async_trait::async_trait;
use std::{fmt::Debug, future::Future, hash::Hash, marker::PhantomData, sync::Arc};
+pub mod batch;
pub mod metrics;
#[cfg(test)]
|
8548ea3b314338113bfb58d2b10db186d71c431c
|
Fraser Savage
|
2023-09-18 17:33:38
|
Pass `IngestState` to WAL replay
|
This requires the `IngestState` and associated types to be public so
that WAL replay can be called by the benchmarker. The module containing
the `IngestState` is private and is only conditionally re-exported under
the benchmark feature as part of the `internal_implementation_details`
module.
| null |
refactor(ingester): Pass `IngestState` to WAL replay
This requires the `IngestState` and associated types to be public so
that WAL replay can be called by the benchmarker. The module containing
the `IngestState` is private and is only conditionally re-exported under
the benchmark feature as part of the `internal_implementation_details`
module.
|
diff --git a/ingester/benches/wal.rs b/ingester/benches/wal.rs
index 4eaec4fd14..39203fd548 100644
--- a/ingester/benches/wal.rs
+++ b/ingester/benches/wal.rs
@@ -12,7 +12,7 @@ use ingester::internal_implementation_details::{
write::{
PartitionedData as PayloadPartitionedData, TableData as PayloadTableData, WriteOperation,
},
- DmlError, DmlSink, IngestOp, PartitionData, PartitionIter,
+ DmlError, DmlSink, IngestOp, IngestState, PartitionData, PartitionIter,
};
use wal::SequencedWalOp;
@@ -66,9 +66,15 @@ fn wal_replay_bench(c: &mut Criterion) {
let persist = MockPersistQueue::default();
// Replay the wal into the NOP.
- ingester::replay(&wal, &sink, Arc::new(persist), &metric::Registry::default())
- .await
- .expect("WAL replay error");
+ ingester::internal_implementation_details::replay(
+ &wal,
+ &sink,
+ Arc::new(persist),
+ Arc::new(IngestState::default()),
+ &metric::Registry::default(),
+ )
+ .await
+ .expect("WAL replay error");
},
// Use the WAL for one test invocation only, and re-create a new one
// for the next iteration.
diff --git a/ingester/src/ingest_state.rs b/ingester/src/ingest_state.rs
index 46d9144c8f..4a6076040e 100644
--- a/ingester/src/ingest_state.rs
+++ b/ingester/src/ingest_state.rs
@@ -14,13 +14,18 @@ use thiserror::Error;
/// Each error variant has a discriminant value that has exactly set one bit in
/// a usize.
#[derive(Debug, Error, Clone, Copy)]
-pub(crate) enum IngestStateError {
+pub enum IngestStateError {
+ /// Set when the ingester has exceeded its capacity for concurrent active
+ /// persist jobs.
#[error("ingester overloaded - persisting backlog")]
PersistSaturated = 1 << 0,
+ /// Set for the duration of the ingester's graceful shutdown procedure.
#[error("ingester is shutting down")]
GracefulStop = 1 << 1,
+ /// Indicates the ingester's disk is too full to safely accept further
+ /// writes.
#[error("ingester disk full - persisting write-ahead log")]
DiskFull = 1 << 2,
}
@@ -46,7 +51,7 @@ impl IngestStateError {
///
/// Error states can be unset by calling [`IngestState::unset()`].
#[derive(Debug, Default)]
-pub(crate) struct IngestState {
+pub struct IngestState {
/// The actual state value.
///
/// The value of this variable is a bitmap covering the [`IngestStateError`]
@@ -64,7 +69,7 @@ impl IngestState {
///
/// Returns true if this call set the error state to `error`, false if
/// `error` was already set.
- pub(crate) fn set(&self, error: IngestStateError) -> bool {
+ pub fn set(&self, error: IngestStateError) -> bool {
let set = error.as_bits();
let mut current = self.state.load(Ordering::Relaxed);
loop {
@@ -100,7 +105,7 @@ impl IngestState {
///
/// Returns true if this call unset the `error` state, false if `error` was
/// already unset.
- pub(crate) fn unset(&self, error: IngestStateError) -> bool {
+ pub fn unset(&self, error: IngestStateError) -> bool {
let unset = error.as_bits();
let mut current = self.state.load(Ordering::Relaxed);
loop {
@@ -139,7 +144,7 @@ impl IngestState {
/// 2. [`IngestStateError::DiskFull`]
/// 3. [`IngestStateError::PersistSaturated`].
///
- pub(crate) fn read(&self) -> Result<(), IngestStateError> {
+ pub fn read(&self) -> Result<(), IngestStateError> {
let current = self.state.load(Ordering::Relaxed);
if current != 0 {
@@ -158,7 +163,7 @@ impl IngestState {
///
/// If more than one error state is set, this follows the same precedence
/// rules as [`IngestState::read()`].
- pub(crate) fn read_with_exceptions<const N: usize>(
+ pub fn read_with_exceptions<const N: usize>(
&self,
exceptions: [IngestStateError; N],
) -> Result<(), IngestStateError> {
diff --git a/ingester/src/init.rs b/ingester/src/init.rs
index 746c6a09cc..5a9b629707 100644
--- a/ingester/src/init.rs
+++ b/ingester/src/init.rs
@@ -4,9 +4,10 @@ use gossip::{NopDispatcher, TopicInterests};
use gossip_parquet_file::tx::ParquetFileTx;
/// This needs to be pub for the benchmarks but should not be used outside the crate.
#[cfg(feature = "benches")]
-pub use wal_replay::*;
+pub mod wal_replay;
mod graceful_shutdown;
+#[cfg(not(feature = "benches"))]
mod wal_replay;
use std::{net::SocketAddr, num::NonZeroUsize, path::PathBuf, sync::Arc, time::Duration};
@@ -462,10 +463,15 @@ where
));
// Replay the WAL log files, if any.
- let max_sequence_number =
- wal_replay::replay(&wal, &buffer, Arc::clone(&persist_handle), &metrics)
- .await
- .map_err(|e| InitError::WalReplay(e.into()))?;
+ let max_sequence_number = wal_replay::replay(
+ &wal,
+ &buffer,
+ Arc::clone(&persist_handle),
+ Arc::clone(&ingest_state),
+ &metrics,
+ )
+ .await
+ .map_err(|e| InitError::WalReplay(e.into()))?;
// Build the chain of DmlSink that forms the write path.
let write_path = DmlSinkInstrumentation::new(
diff --git a/ingester/src/init/wal_replay.rs b/ingester/src/init/wal_replay.rs
index c20a529fda..a876550c5e 100644
--- a/ingester/src/init/wal_replay.rs
+++ b/ingester/src/init/wal_replay.rs
@@ -13,6 +13,7 @@ use crate::{
dml_payload::write::{PartitionedData, TableData, WriteOperation},
dml_payload::IngestOp,
dml_sink::{DmlError, DmlSink},
+ ingest_state::IngestState,
partition_iter::PartitionIter,
persist::{drain_buffer::persist_partitions, queue::PersistQueue},
};
@@ -103,6 +104,7 @@ pub async fn replay<W, T, P>(
wal: &W,
sink: &T,
persist: P,
+ ingest_state: Arc<IngestState>,
metrics: &metric::Registry,
) -> Result<Option<SequenceNumber>, WalReplayError>
where
@@ -180,8 +182,14 @@ where
);
// Replay this segment file, tracking successful replay in the metric
- let replay_result =
- replay_file(reader, sink, &ok_op_count_metric, &empty_op_count_metric).await;
+ let replay_result = replay_file(
+ reader,
+ sink,
+ &ok_op_count_metric,
+ &empty_op_count_metric,
+ &ingest_state,
+ )
+ .await;
if replay_result.is_ok() {
file_count_success_metric.inc(1);
}
@@ -282,6 +290,7 @@ async fn replay_file<T, F>(
sink: &T,
ok_op_count_metric: &U64Counter,
empty_op_count_metric: &U64Counter,
+ _ingest_state: &Arc<IngestState>,
) -> Result<Option<SequenceNumber>, WalReplayError>
where
T: DmlSink,
@@ -572,10 +581,17 @@ mod tests {
partitions: vec![Arc::new(Mutex::new(partition))],
};
+ let ingest_state = Arc::new(IngestState::default());
let metrics = metric::Registry::default();
- let max_sequence_number = replay(&wal, &mock_iter, Arc::clone(&persist), &metrics)
- .await
- .expect("failed to replay WAL");
+ let max_sequence_number = replay(
+ &wal,
+ &mock_iter,
+ Arc::clone(&persist),
+ Arc::clone(&ingest_state),
+ &metrics,
+ )
+ .await
+ .expect("failed to replay WAL");
assert_eq!(max_sequence_number, Some(SequenceNumber::new(43)));
@@ -794,10 +810,16 @@ mod tests {
};
let metrics = metric::Registry::default();
- let max_sequence_number = replay(&wal, &mock_iter, Arc::clone(&persist), &metrics)
- .await
- .expect("failed to replay WAL")
- .expect("should receive max sequence number");
+ let max_sequence_number = replay(
+ &wal,
+ &mock_iter,
+ Arc::clone(&persist),
+ Arc::new(IngestState::default()),
+ &metrics,
+ )
+ .await
+ .expect("failed to replay WAL")
+ .expect("should receive max sequence number");
assert_eq!(max_sequence_number, SequenceNumber::new(3));
assert!(wal.closed_segment_ids.lock().is_empty());
@@ -854,7 +876,14 @@ mod tests {
};
let metrics = metric::Registry::default();
- let replay_result = replay(&wal, &mock_iter, Arc::clone(&persist), &metrics).await;
+ let replay_result = replay(
+ &wal,
+ &mock_iter,
+ Arc::clone(&persist),
+ Arc::new(IngestState::default()),
+ &metrics,
+ )
+ .await;
assert_matches!(
replay_result,
Err(WalReplayError::ReadEntry(_, Some(id))) => {
diff --git a/ingester/src/lib.rs b/ingester/src/lib.rs
index 29419b7c1b..ef5872edee 100644
--- a/ingester/src/lib.rs
+++ b/ingester/src/lib.rs
@@ -237,6 +237,8 @@ pub mod internal_implementation_details {
pub use super::buffer_tree::*;
pub use super::dml_payload::*;
pub use super::dml_sink::*;
+ pub use super::ingest_state::*;
+ pub use super::init::wal_replay::*;
pub use super::partition_iter::*;
pub use super::persist::*;
}
|
f22b1e1a09e863430201ad520913f4c7ea633975
|
Andrew Lamb
|
2022-11-29 07:05:17
|
Update datafusion (to get memory limiting code) (#6246)
|
* chore: Update datafusion
* chore: Run cargo hakari tasks
* fix: Update to newer api
|
Co-authored-by: CircleCI[bot] <[email protected]>
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
chore: Update datafusion (to get memory limiting code) (#6246)
* chore: Update datafusion
* chore: Run cargo hakari tasks
* fix: Update to newer api
Co-authored-by: CircleCI[bot] <[email protected]>
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
diff --git a/Cargo.lock b/Cargo.lock
index 6086d56cf9..d42b7b7e10 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -1238,7 +1238,7 @@ dependencies = [
[[package]]
name = "datafusion"
version = "14.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=a61615b2949bea9027eefe686613605e135780f2#a61615b2949bea9027eefe686613605e135780f2"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=dd3f72ad13df3e3ab2efde73eba546012eaf10fd#dd3f72ad13df3e3ab2efde73eba546012eaf10fd"
dependencies = [
"ahash 0.8.2",
"arrow",
@@ -1283,7 +1283,7 @@ dependencies = [
[[package]]
name = "datafusion-common"
version = "14.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=a61615b2949bea9027eefe686613605e135780f2#a61615b2949bea9027eefe686613605e135780f2"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=dd3f72ad13df3e3ab2efde73eba546012eaf10fd#dd3f72ad13df3e3ab2efde73eba546012eaf10fd"
dependencies = [
"arrow",
"chrono",
@@ -1295,7 +1295,7 @@ dependencies = [
[[package]]
name = "datafusion-expr"
version = "14.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=a61615b2949bea9027eefe686613605e135780f2#a61615b2949bea9027eefe686613605e135780f2"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=dd3f72ad13df3e3ab2efde73eba546012eaf10fd#dd3f72ad13df3e3ab2efde73eba546012eaf10fd"
dependencies = [
"ahash 0.8.2",
"arrow",
@@ -1307,7 +1307,7 @@ dependencies = [
[[package]]
name = "datafusion-optimizer"
version = "14.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=a61615b2949bea9027eefe686613605e135780f2#a61615b2949bea9027eefe686613605e135780f2"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=dd3f72ad13df3e3ab2efde73eba546012eaf10fd#dd3f72ad13df3e3ab2efde73eba546012eaf10fd"
dependencies = [
"arrow",
"async-trait",
@@ -1322,7 +1322,7 @@ dependencies = [
[[package]]
name = "datafusion-physical-expr"
version = "14.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=a61615b2949bea9027eefe686613605e135780f2#a61615b2949bea9027eefe686613605e135780f2"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=dd3f72ad13df3e3ab2efde73eba546012eaf10fd#dd3f72ad13df3e3ab2efde73eba546012eaf10fd"
dependencies = [
"ahash 0.8.2",
"arrow",
@@ -1351,7 +1351,7 @@ dependencies = [
[[package]]
name = "datafusion-proto"
version = "14.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=a61615b2949bea9027eefe686613605e135780f2#a61615b2949bea9027eefe686613605e135780f2"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=dd3f72ad13df3e3ab2efde73eba546012eaf10fd#dd3f72ad13df3e3ab2efde73eba546012eaf10fd"
dependencies = [
"arrow",
"datafusion",
@@ -1365,7 +1365,7 @@ dependencies = [
[[package]]
name = "datafusion-row"
version = "14.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=a61615b2949bea9027eefe686613605e135780f2#a61615b2949bea9027eefe686613605e135780f2"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=dd3f72ad13df3e3ab2efde73eba546012eaf10fd#dd3f72ad13df3e3ab2efde73eba546012eaf10fd"
dependencies = [
"arrow",
"datafusion-common",
@@ -1376,7 +1376,7 @@ dependencies = [
[[package]]
name = "datafusion-sql"
version = "14.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=a61615b2949bea9027eefe686613605e135780f2#a61615b2949bea9027eefe686613605e135780f2"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=dd3f72ad13df3e3ab2efde73eba546012eaf10fd#dd3f72ad13df3e3ab2efde73eba546012eaf10fd"
dependencies = [
"arrow",
"datafusion-common",
diff --git a/Cargo.toml b/Cargo.toml
index b62da9cf3d..efc4645947 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -113,8 +113,8 @@ license = "MIT OR Apache-2.0"
[workspace.dependencies]
arrow = { version = "27.0.0" }
arrow-flight = { version = "27.0.0" }
-datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev="a61615b2949bea9027eefe686613605e135780f2", default-features = false }
-datafusion-proto = { git = "https://github.com/apache/arrow-datafusion.git", rev="a61615b2949bea9027eefe686613605e135780f2" }
+datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev="dd3f72ad13df3e3ab2efde73eba546012eaf10fd", default-features = false }
+datafusion-proto = { git = "https://github.com/apache/arrow-datafusion.git", rev="dd3f72ad13df3e3ab2efde73eba546012eaf10fd" }
hashbrown = { version = "0.13.1" }
parquet = { version = "27.0.0" }
diff --git a/arrow_util/src/optimize.rs b/arrow_util/src/optimize.rs
index a111b305cd..0d39ca03e9 100644
--- a/arrow_util/src/optimize.rs
+++ b/arrow_util/src/optimize.rs
@@ -234,7 +234,6 @@ mod tests {
};
use arrow::compute::{concat, concat_batches};
use arrow_flight::utils::flight_data_to_arrow_batch;
- use datafusion::physical_plan::limit::truncate_batch;
use std::iter::FromIterator;
#[test]
@@ -463,7 +462,7 @@ mod tests {
let (_, baseline_flight_batch) =
arrow_flight::utils::flight_data_from_arrow_batch(&batch, &options);
- let big_batch = truncate_batch(&batch, batch.num_rows() - 1);
+ let big_batch = batch.slice(0, batch.num_rows() - 1);
let optimized_big_batch =
optimize_record_batch(&big_batch, Arc::clone(&schema)).expect("failed to optimize");
let (_, optimized_big_flight_batch) =
@@ -474,7 +473,7 @@ mod tests {
optimized_big_flight_batch.data_body.len()
);
- let small_batch = truncate_batch(&batch, 1);
+ let small_batch = batch.slice(0, 1);
let optimized_small_batch =
optimize_record_batch(&small_batch, Arc::clone(&schema)).expect("failed to optimize");
let (_, optimized_small_flight_batch) =
diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml
index fe710253dc..9dfd95be94 100644
--- a/workspace-hack/Cargo.toml
+++ b/workspace-hack/Cargo.toml
@@ -25,7 +25,7 @@ bytes = { version = "1", features = ["std"] }
chrono = { version = "0.4", default-features = false, features = ["alloc", "clock", "iana-time-zone", "serde", "std", "winapi"] }
crossbeam-utils = { version = "0.8", features = ["std"] }
crypto-common = { version = "0.1", default-features = false, features = ["std"] }
-datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "a61615b2949bea9027eefe686613605e135780f2", features = ["async-compression", "bzip2", "compression", "crypto_expressions", "flate2", "regex_expressions", "unicode_expressions", "xz2"] }
+datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "dd3f72ad13df3e3ab2efde73eba546012eaf10fd", features = ["async-compression", "bzip2", "compression", "crypto_expressions", "flate2", "regex_expressions", "unicode_expressions", "xz2"] }
digest = { version = "0.10", features = ["alloc", "block-buffer", "core-api", "mac", "std", "subtle"] }
either = { version = "1", features = ["use_std"] }
fixedbitset = { version = "0.4", features = ["std"] }
|
fb6b3f66da6d1fc445f09c17988820953932a8af
|
Chunchun Ye
|
2023-04-12 11:58:44
|
support `database`, `bucket`, and `bucket-name` as grpc header names (#7511)
|
* feat: support `database`, `bucket`, and `bucket-name` as grpc header names
* chore: lint
* chore: update doc to accept `database`, `bucket`, and `bucket-name` as parameter names
* chore: update doc to only show `database` as the parameter name
* refactor: consolidate header names into a const vec and update comments on database
---------
|
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
feat(flightsql): support `database`, `bucket`, and `bucket-name` as grpc header names (#7511)
* feat: support `database`, `bucket`, and `bucket-name` as grpc header names
* chore: lint
* chore: update doc to accept `database`, `bucket`, and `bucket-name` as parameter names
* chore: update doc to only show `database` as the parameter name
* refactor: consolidate header names into a const vec and update comments on database
---------
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
diff --git a/docs/flightsql.md b/docs/flightsql.md
index c100846db4..0aee1a3ea8 100644
--- a/docs/flightsql.md
+++ b/docs/flightsql.md
@@ -2,19 +2,19 @@
InfluxDB IOx supports running SQL queries via [Apache Arrow Flight SQL](https://arrow.apache.org/docs/format/FlightSql.html)
-You can use either a native FlightSQL client as well as JDBC / ODBC Flight SQL drivers
+You can use either a native FlightSQL client as well as JDBC / ODBC Flight SQL drivers
## JDBC:
To use the JDBC driver with IOx:
1. Download the driver by following the link from [Maven](https://mvnrepository.com/artifact/org.apache.arrow/flight-sql/10.0.1) or [Dremio](https://www.dremio.com/drivers/jdbc/)
-2. Use a jdbc conection of the format: `jdbc:arrow-flight-sql://hostname:port?useEncryption=false&iox-namespace-name=NAME`.
+2. Use a jdbc conection of the format: `jdbc:arrow-flight-sql://hostname:port?useEncryption=false&database=NAME`
-`hostname:port` is the host / port on which the IOx query gRPC API is running (default port is 8082), and `NAME` is the namespace name (for example, `26f7e5a4b7be365b_917b97a92e883afc`)
+`hostname:port` is the host / port on which the IOx query gRPC API is running (default port is 8082), and `NAME` is the database name (for example, `26f7e5a4b7be365b_917b97a92e883afc`)
An example JDBC URL is:
```
-jdbc:arrow-flight-sql://localhost:8082?useEncryption=false&iox-namespace-name=26f7e5a4b7be365b_917b97a92e883afc
+jdbc:arrow-flight-sql://localhost:8082?useEncryption=false&database=26f7e5a4b7be365b_917b97a92e883afc
```
diff --git a/influxdb_iox/tests/end_to_end_cases/flightsql.rs b/influxdb_iox/tests/end_to_end_cases/flightsql.rs
index 8db60cf47b..7c64a13fdb 100644
--- a/influxdb_iox/tests/end_to_end_cases/flightsql.rs
+++ b/influxdb_iox/tests/end_to_end_cases/flightsql.rs
@@ -1357,8 +1357,153 @@ async fn authz() {
authz.close().await;
}
+/// Ensure that FligthSQL API supports the following grpc header names,
+/// in addition to the existing `iox-namespace-name`
+/// 1. database
+/// 2. bucket
+/// 3. bucket-name
+#[tokio::test]
+async fn flightsql_client_header_name_database() {
+ test_helpers::maybe_start_logging();
+ let database_url = maybe_skip_integration!();
+
+ let table_name = "the_table";
+
+ // Set up the cluster ====================================
+ let mut cluster = MiniCluster::create_shared2(database_url).await;
+
+ StepTest::new(
+ &mut cluster,
+ vec![
+ Step::WriteLineProtocol(format!(
+ "{table_name},tag1=A,tag2=B val=42i 123456\n\
+ {table_name},tag1=A,tag2=C val=43i 123457"
+ )),
+ Step::Custom(Box::new(move |state: &mut StepTestState| {
+ async move {
+ let mut client = flightsql_client_helper(state.cluster(), "database");
+
+ let stream = client.get_table_types().await.unwrap();
+ let batches = collect_stream(stream).await;
+
+ insta::assert_yaml_snapshot!(
+ batches_to_sorted_lines(&batches),
+ @r###"
+ ---
+ - +------------+
+ - "| table_type |"
+ - +------------+
+ - "| BASE TABLE |"
+ - "| VIEW |"
+ - +------------+
+ "###
+ );
+ }
+ .boxed()
+ })),
+ ],
+ )
+ .run()
+ .await
+}
+
+#[tokio::test]
+async fn flightsql_client_header_name_bucket() {
+ test_helpers::maybe_start_logging();
+ let database_url = maybe_skip_integration!();
+
+ let table_name = "the_table";
+
+ // Set up the cluster ====================================
+ let mut cluster = MiniCluster::create_shared2(database_url).await;
+
+ StepTest::new(
+ &mut cluster,
+ vec![
+ Step::WriteLineProtocol(format!(
+ "{table_name},tag1=A,tag2=B val=42i 123456\n\
+ {table_name},tag1=A,tag2=C val=43i 123457"
+ )),
+ Step::Custom(Box::new(move |state: &mut StepTestState| {
+ async move {
+ let mut client = flightsql_client_helper(state.cluster(), "bucket");
+
+ let stream = client.get_table_types().await.unwrap();
+ let batches = collect_stream(stream).await;
+
+ insta::assert_yaml_snapshot!(
+ batches_to_sorted_lines(&batches),
+ @r###"
+ ---
+ - +------------+
+ - "| table_type |"
+ - +------------+
+ - "| BASE TABLE |"
+ - "| VIEW |"
+ - +------------+
+ "###
+ );
+ }
+ .boxed()
+ })),
+ ],
+ )
+ .run()
+ .await
+}
+
+#[tokio::test]
+async fn flightsql_client_header_name_bucket_name() {
+ test_helpers::maybe_start_logging();
+ let database_url = maybe_skip_integration!();
+
+ let table_name = "the_table";
+
+ // Set up the cluster ====================================
+ let mut cluster = MiniCluster::create_shared2(database_url).await;
+
+ StepTest::new(
+ &mut cluster,
+ vec![
+ Step::WriteLineProtocol(format!(
+ "{table_name},tag1=A,tag2=B val=42i 123456\n\
+ {table_name},tag1=A,tag2=C val=43i 123457"
+ )),
+ Step::Custom(Box::new(move |state: &mut StepTestState| {
+ async move {
+ let mut client = flightsql_client_helper(state.cluster(), "bucket-name");
+
+ let stream = client.get_table_types().await.unwrap();
+ let batches = collect_stream(stream).await;
+
+ insta::assert_yaml_snapshot!(
+ batches_to_sorted_lines(&batches),
+ @r###"
+ ---
+ - +------------+
+ - "| table_type |"
+ - +------------+
+ - "| BASE TABLE |"
+ - "| VIEW |"
+ - +------------+
+ "###
+ );
+ }
+ .boxed()
+ })),
+ ],
+ )
+ .run()
+ .await
+}
+
/// Return a [`FlightSqlClient`] configured for use
fn flightsql_client(cluster: &MiniCluster) -> FlightSqlClient {
+ flightsql_client_helper(cluster, "iox-namespace-name")
+}
+
+/// Helper function for fn `flightsql_client` that returns a [`FlightSqlClient`] configured for use
+fn flightsql_client_helper(cluster: &MiniCluster, header_name: &str) -> FlightSqlClient {
let connection = cluster.querier().querier_grpc_connection();
let (channel, _headers) = connection.into_grpc_connection().into_parts();
@@ -1366,7 +1511,7 @@ fn flightsql_client(cluster: &MiniCluster) -> FlightSqlClient {
// Add namespace to client headers until it is fully supported by FlightSQL
let namespace = cluster.namespace();
- client.add_header("iox-namespace-name", namespace).unwrap();
+ client.add_header(header_name, namespace).unwrap();
client
}
diff --git a/influxdb_iox/tests/jdbc_client/README.md b/influxdb_iox/tests/jdbc_client/README.md
index 600a43b7d0..fe0a94879a 100644
--- a/influxdb_iox/tests/jdbc_client/README.md
+++ b/influxdb_iox/tests/jdbc_client/README.md
@@ -10,14 +10,15 @@ influxdb_iox -v
## Run the JDBC test
-To run the JDBC test program, specify the target namespace in the JDBC URL:
+To run the JDBC test program, specify the target database in the JDBC URL:
```shell
# run the jdbc client driver program, downloading the JDBC driver if needed
-./jdbc_client "jdbc:arrow-flight-sql://localhost:8082?useEncryption=false&iox-namespace-name=26f7e5a4b7be365b_917b97a92e883afc" query 'select * from cpu'
+./jdbc_client "jdbc:arrow-flight-sql://localhost:8082?useEncryption=false&database=26f7e5a4b7be365b_917b97a92e883afc" query 'select * from cpu'
```
# Cleanup:
+
Clean up any intermediate files (like JDBC driver)
```shell
diff --git a/service_grpc_flight/src/lib.rs b/service_grpc_flight/src/lib.rs
index 737df81e4c..75285d4c7e 100644
--- a/service_grpc_flight/src/lib.rs
+++ b/service_grpc_flight/src/lib.rs
@@ -37,12 +37,17 @@ use trace::{ctx::SpanContext, span::SpanExt};
use trace_http::ctx::{RequestLogContext, RequestLogContextExt};
use tracker::InstrumentedAsyncOwnedSemaphorePermit;
-/// The name of the grpc header that contains the target iox namespace
-/// name for FlightSQL requests.
+/// The supported names of the grpc header that contain the target database
+/// for FlightSQL requests.
///
/// See <https://lists.apache.org/thread/fd6r1n7vt91sg2c7fr35wcrsqz6x4645>
/// for discussion on adding support to FlightSQL itself.
-const IOX_FLIGHT_SQL_NAMESPACE_HEADER: &str = "iox-namespace-name";
+const IOX_FLIGHT_SQL_DATABASE_HEADERS: [&str; 4] = [
+ "database", // preferred
+ "bucket",
+ "bucket-name",
+ "iox-namespace-name", // deprecated
+];
#[allow(clippy::enum_variant_names)]
#[derive(Debug, Snafu)]
@@ -69,9 +74,24 @@ pub enum Error {
source: DataFusionError,
},
+ #[snafu(display(
+ "More than one headers are found in request: {:?}. \
+ Please include only one of them",
+ header_names
+ ))]
+ TooManyFlightSQLDatabases { header_names: Vec<String> },
+
#[snafu(display("no 'iox-namespace-name' header in request"))]
NoFlightSQLNamespace,
+ #[snafu(display(
+ "Invalid 'database' or 'bucket' or 'bucket-name' header in request: {}",
+ source
+ ))]
+ InvalidDatabaseHeader {
+ source: tonic::metadata::errors::ToStrError,
+ },
+
#[snafu(display("Invalid 'iox-namespace-name' header in request: {}", source))]
InvalidNamespaceHeader {
source: tonic::metadata::errors::ToStrError,
@@ -125,7 +145,9 @@ impl From<Error> for tonic::Status {
| Error::InvalidNamespaceName { .. } => info!(e=%err, msg),
Error::Query { .. } => info!(e=%err, msg),
Error::Optimize { .. }
+ | Error::TooManyFlightSQLDatabases { .. }
| Error::NoFlightSQLNamespace
+ | Error::InvalidDatabaseHeader { .. }
| Error::InvalidNamespaceHeader { .. }
| Error::Planning { .. }
| Error::Deserialization { .. }
@@ -152,7 +174,9 @@ impl Error {
Self::InvalidTicket { .. }
| Self::InvalidHandshake { .. }
| Self::Deserialization { .. }
+ | Self::TooManyFlightSQLDatabases { .. }
| Self::NoFlightSQLNamespace
+ | Self::InvalidDatabaseHeader { .. }
| Self::InvalidNamespaceHeader { .. }
| Self::InvalidNamespaceName { .. } => tonic::Code::InvalidArgument,
Self::Planning { source, .. } | Self::Query { source, .. } => {
@@ -709,10 +733,41 @@ fn cmd_from_descriptor(flight_descriptor: FlightDescriptor) -> Result<FlightSQLC
}
}
-/// Figure out the namespace for this request by checking
-/// the "iox-namespace-name=the_name";
+/// Figure out the database for this request by checking
+/// the "database=database_or_bucket_name" (preferred)
+/// or "bucket=database_or_bucket_name"
+/// or "bucket-name=database_or_bucket_name"
+/// or "iox-namespace-name=the_name" (deprecated);
+///
+/// Only one of the keys is accepted.
+///
+/// Note that `iox-namespace-name` is still accepted (rather than error) for
+/// some period of time until we are sure that all other software speaking
+/// FlightSQL is using the new header names.
fn get_flightsql_namespace(metadata: &MetadataMap) -> Result<String> {
- if let Some(v) = metadata.get(IOX_FLIGHT_SQL_NAMESPACE_HEADER) {
+ let mut found_header_keys: Vec<String> = vec![];
+ for key in IOX_FLIGHT_SQL_DATABASE_HEADERS {
+ if metadata.contains_key(key) {
+ found_header_keys.push(key.to_string());
+ }
+ }
+ if found_header_keys.len() > 1 {
+ return TooManyFlightSQLDatabasesSnafu {
+ header_names: found_header_keys,
+ }
+ .fail();
+ }
+
+ if let Some(v) = metadata.get(IOX_FLIGHT_SQL_DATABASE_HEADERS[0]) {
+ let v = v.to_str().context(InvalidDatabaseHeaderSnafu)?;
+ return Ok(v.to_string());
+ } else if let Some(v) = metadata.get(IOX_FLIGHT_SQL_DATABASE_HEADERS[1]) {
+ let v = v.to_str().context(InvalidDatabaseHeaderSnafu)?;
+ return Ok(v.to_string());
+ } else if let Some(v) = metadata.get(IOX_FLIGHT_SQL_DATABASE_HEADERS[2]) {
+ let v = v.to_str().context(InvalidDatabaseHeaderSnafu)?;
+ return Ok(v.to_string());
+ } else if let Some(v) = metadata.get(IOX_FLIGHT_SQL_DATABASE_HEADERS[3]) {
let v = v.to_str().context(InvalidNamespaceHeaderSnafu)?;
return Ok(v.to_string());
}
|
309177b750c72b3f510bab5b4dc67b8491e3d7dc
|
Marco Neumann
|
2023-03-08 17:36:27
|
phys. pred. pushdown to parquet (#7159)
|
For #6098.
| null |
feat: phys. pred. pushdown to parquet (#7159)
For #6098.
|
diff --git a/iox_query/src/physical_optimizer/predicate_pushdown.rs b/iox_query/src/physical_optimizer/predicate_pushdown.rs
index 93ba67432f..2d95b52ce4 100644
--- a/iox_query/src/physical_optimizer/predicate_pushdown.rs
+++ b/iox_query/src/physical_optimizer/predicate_pushdown.rs
@@ -13,6 +13,7 @@ use datafusion::{
physical_plan::{
empty::EmptyExec,
expressions::{BinaryExpr, Column},
+ file_format::ParquetExec,
filter::FilterExec,
rewrite::TreeNodeRewritable,
union::UnionExec,
@@ -59,6 +60,27 @@ impl PhysicalOptimizerRule for PredicatePushdown {
.collect::<Result<Vec<_>>>()?;
let new_union = UnionExec::new(new_inputs);
return Ok(Some(Arc::new(new_union)));
+ } else if let Some(child_parquet) = child_any.downcast_ref::<ParquetExec>() {
+ let existing = child_parquet
+ .predicate()
+ .map(split_conjunction)
+ .unwrap_or_default();
+ let both = conjunction(
+ existing
+ .into_iter()
+ .chain(split_conjunction(filter_exec.predicate()))
+ .cloned(),
+ );
+
+ let new_node = Arc::new(FilterExec::try_new(
+ Arc::clone(filter_exec.predicate()),
+ Arc::new(ParquetExec::new(
+ child_parquet.base_config().clone(),
+ both,
+ None,
+ )),
+ )?);
+ return Ok(Some(new_node));
} else if let Some(child_dedup) = child_any.downcast_ref::<DeduplicateExec>() {
let dedup_cols = child_dedup.sort_columns();
let (pushdown, no_pushdown): (Vec<_>, Vec<_>) =
@@ -143,11 +165,13 @@ fn conjunction(
mod tests {
use arrow::datatypes::{DataType, Field, Schema, SchemaRef};
use datafusion::{
+ datasource::object_store::ObjectStoreUrl,
logical_expr::Operator,
physical_expr::PhysicalSortExpr,
physical_plan::{
expressions::{BinaryExpr, Column, Literal},
- PhysicalExpr,
+ file_format::FileScanConfig,
+ PhysicalExpr, Statistics,
},
scalar::ScalarValue,
};
@@ -280,6 +304,47 @@ mod tests {
);
}
+ #[test]
+ fn test_parquet() {
+ let schema = schema();
+ let base_config = FileScanConfig {
+ object_store_url: ObjectStoreUrl::parse("test://").unwrap(),
+ file_schema: Arc::clone(&schema),
+ file_groups: vec![],
+ statistics: Statistics::default(),
+ projection: None,
+ limit: None,
+ table_partition_cols: vec![],
+ output_ordering: None,
+ infinite_source: false,
+ };
+ let plan = Arc::new(
+ FilterExec::try_new(
+ predicate_mixed(&schema),
+ Arc::new(ParquetExec::new(
+ base_config,
+ Some(predicate_tag(&schema)),
+ None,
+ )),
+ )
+ .unwrap(),
+ );
+ let opt = PredicatePushdown::default();
+ insta::assert_yaml_snapshot!(
+ OptimizationTest::new(plan, opt),
+ @r###"
+ ---
+ input:
+ - " FilterExec: tag1@0 = field@2"
+ - " ParquetExec: limit=None, partitions={0 groups: []}, predicate=tag1@0 = foo, pruning_predicate=tag1_min@0 <= foo AND foo <= tag1_max@1, projection=[tag1, tag2, field]"
+ output:
+ Ok:
+ - " FilterExec: tag1@0 = field@2"
+ - " ParquetExec: limit=None, partitions={0 groups: []}, predicate=tag1@0 = foo AND tag1@0 = field@2, pruning_predicate=tag1_min@0 <= foo AND foo <= tag1_max@1, projection=[tag1, tag2, field]"
+ "###
+ );
+ }
+
#[test]
fn test_dedup_no_pushdown() {
let schema = schema();
|
728b7293b9bb223e845f2951a9d097bca34f5793
|
Fraser Savage
|
2023-04-11 15:36:46
|
Use read-through namespace cache for NamespaceResolver
|
The NamespaceResolver was using its own very similar look-aside caching
to the DML handlers, this commit leverages the read-through cache
implementation to deduplicate more code and makes the read through
behavioural expectation explicit for namespace autocreation.
| null |
feat(router): Use read-through namespace cache for NamespaceResolver
The NamespaceResolver was using its own very similar look-aside caching
to the DML handlers, this commit leverages the read-through cache
implementation to deduplicate more code and makes the read through
behavioural expectation explicit for namespace autocreation.
|
diff --git a/ioxd_router/src/lib.rs b/ioxd_router/src/lib.rs
index 27384fba94..e3a8e7bdbf 100644
--- a/ioxd_router/src/lib.rs
+++ b/ioxd_router/src/lib.rs
@@ -362,8 +362,7 @@ pub async fn create_router2_server_type(
// e. Namespace resolver
// Initialise the Namespace ID lookup + cache
- let namespace_resolver =
- NamespaceSchemaResolver::new(Arc::clone(&catalog), Arc::clone(&ns_cache));
+ let namespace_resolver = NamespaceSchemaResolver::new(Arc::clone(&ns_cache));
////////////////////////////////////////////////////////////////////////////
//
@@ -554,8 +553,7 @@ pub async fn create_router_server_type<'a>(
// e. Namespace resolver
// Initialise the Namespace ID lookup + cache
- let namespace_resolver =
- NamespaceSchemaResolver::new(Arc::clone(&catalog), Arc::clone(&ns_cache));
+ let namespace_resolver = NamespaceSchemaResolver::new(Arc::clone(&ns_cache));
////////////////////////////////////////////////////////////////////////////
//
diff --git a/router/src/namespace_cache/read_through_cache.rs b/router/src/namespace_cache/read_through_cache.rs
index 8f8cf96773..b9723be6cd 100644
--- a/router/src/namespace_cache/read_through_cache.rs
+++ b/router/src/namespace_cache/read_through_cache.rs
@@ -41,11 +41,11 @@ where
&self,
namespace: &NamespaceName<'static>,
) -> Result<Arc<NamespaceSchema>, Self::ReadError> {
- let mut repos = self.catalog.repositories().await;
-
match self.inner_cache.get_schema(namespace).await {
Ok(v) => Ok(v),
Err(CacheMissErr { .. }) => {
+ let mut repos = self.catalog.repositories().await;
+
let schema = match get_schema_by_name(
namespace,
repos.deref_mut(),
diff --git a/router/src/namespace_resolver.rs b/router/src/namespace_resolver.rs
index d24fde4328..b0caacb2a9 100644
--- a/router/src/namespace_resolver.rs
+++ b/router/src/namespace_resolver.rs
@@ -1,11 +1,7 @@
//! An trait to abstract resolving a[`NamespaceName`] to [`NamespaceId`], and a
//! collection of composable implementations.
-
-use std::{ops::DerefMut, sync::Arc};
-
use async_trait::async_trait;
use data_types::{NamespaceId, NamespaceName};
-use iox_catalog::interface::{get_schema_by_name, Catalog, SoftDeletedRows};
use observability_deps::tracing::*;
use thiserror::Error;
@@ -37,27 +33,25 @@ pub trait NamespaceResolver: std::fmt::Debug + Send + Sync {
) -> Result<NamespaceId, Error>;
}
-/// An implementation of [`NamespaceResolver`] that queries the [`Catalog`] to
-/// resolve a [`NamespaceId`], and populates the [`NamespaceCache`] as a side
-/// effect.
+/// An implementation of [`NamespaceResolver`] that resolves the [`NamespaceId`]
+/// for a given name through a [`NamespaceCache`].
#[derive(Debug)]
pub struct NamespaceSchemaResolver<C> {
- catalog: Arc<dyn Catalog>,
cache: C,
}
impl<C> NamespaceSchemaResolver<C> {
- /// Construct a new [`NamespaceSchemaResolver`] that fetches schemas from
- /// `catalog` and caches them in `cache`.
- pub fn new(catalog: Arc<dyn Catalog>, cache: C) -> Self {
- Self { catalog, cache }
+ /// Construct a new [`NamespaceSchemaResolver`] that resolves namespace IDs
+ /// using `cache`.
+ pub fn new(cache: C) -> Self {
+ Self { cache }
}
}
#[async_trait]
impl<C> NamespaceResolver for NamespaceSchemaResolver<C>
where
- C: NamespaceCache,
+ C: NamespaceCache<ReadError = iox_catalog::interface::Error>,
{
async fn get_namespace_id(
&self,
@@ -67,36 +61,7 @@ where
// from the global catalog (if it exists).
match self.cache.get_schema(namespace).await {
Ok(v) => Ok(v.id),
- Err(_) => {
- let mut repos = self.catalog.repositories().await;
-
- // Pull the schema from the global catalog or error if it does
- // not exist.
- let schema = get_schema_by_name(
- namespace,
- repos.deref_mut(),
- SoftDeletedRows::ExcludeDeleted,
- )
- .await
- .map_err(|e| {
- warn!(
- error=%e,
- %namespace,
- "failed to retrieve namespace schema"
- );
- Error::Lookup(e)
- })
- .map(Arc::new)?;
-
- // Cache population MAY race with other threads and lead to
- // overwrites, but an entry will always exist once inserted, and
- // the schemas will eventually converge.
- self.cache
- .put_schema(namespace.clone(), Arc::clone(&schema));
-
- trace!(%namespace, "schema cache populated");
- Ok(schema.id)
- }
+ Err(e) => return Err(Error::Lookup(e)),
}
}
}
@@ -107,17 +72,26 @@ mod tests {
use assert_matches::assert_matches;
use data_types::{NamespaceId, NamespaceSchema, QueryPoolId, TopicId};
- use iox_catalog::mem::MemCatalog;
+ use iox_catalog::{
+ interface::{Catalog, SoftDeletedRows},
+ mem::MemCatalog,
+ };
use super::*;
- use crate::namespace_cache::MemoryNamespaceCache;
+ use crate::namespace_cache::{MemoryNamespaceCache, ReadThroughCache};
#[tokio::test]
async fn test_cache_hit() {
let ns = NamespaceName::try_from("bananas").unwrap();
+ let metrics = Arc::new(metric::Registry::new());
+ let catalog: Arc<dyn Catalog> = Arc::new(MemCatalog::new(metrics));
+
// Prep the cache before the test to cause a hit
- let cache = Arc::new(MemoryNamespaceCache::default());
+ let cache = Arc::new(ReadThroughCache::new(
+ Arc::new(MemoryNamespaceCache::default()),
+ Arc::clone(&catalog),
+ ));
cache.put_schema(
ns.clone(),
NamespaceSchema {
@@ -131,10 +105,7 @@ mod tests {
},
);
- let metrics = Arc::new(metric::Registry::new());
- let catalog: Arc<dyn Catalog> = Arc::new(MemCatalog::new(metrics));
-
- let resolver = NamespaceSchemaResolver::new(Arc::clone(&catalog), Arc::clone(&cache));
+ let resolver = NamespaceSchemaResolver::new(Arc::clone(&cache));
// Drive the code under test
resolver
@@ -162,9 +133,12 @@ mod tests {
async fn test_cache_miss() {
let ns = NamespaceName::try_from("bananas").unwrap();
- let cache = Arc::new(MemoryNamespaceCache::default());
let metrics = Arc::new(metric::Registry::new());
let catalog: Arc<dyn Catalog> = Arc::new(MemCatalog::new(metrics));
+ let cache = Arc::new(ReadThroughCache::new(
+ Arc::new(MemoryNamespaceCache::default()),
+ Arc::clone(&catalog),
+ ));
// Create the namespace in the catalog
{
@@ -178,7 +152,7 @@ mod tests {
.expect("failed to setup catalog state");
}
- let resolver = NamespaceSchemaResolver::new(Arc::clone(&catalog), Arc::clone(&cache));
+ let resolver = NamespaceSchemaResolver::new(Arc::clone(&cache));
resolver
.get_namespace_id(&ns)
@@ -193,9 +167,12 @@ mod tests {
async fn test_cache_miss_soft_deleted() {
let ns = NamespaceName::try_from("bananas").unwrap();
- let cache = Arc::new(MemoryNamespaceCache::default());
let metrics = Arc::new(metric::Registry::new());
let catalog: Arc<dyn Catalog> = Arc::new(MemCatalog::new(metrics));
+ let cache = Arc::new(ReadThroughCache::new(
+ Arc::new(MemoryNamespaceCache::default()),
+ Arc::clone(&catalog),
+ ));
// Create the namespace in the catalog and mark it as deleted
{
@@ -214,7 +191,7 @@ mod tests {
.expect("failed to setup catalog state");
}
- let resolver = NamespaceSchemaResolver::new(Arc::clone(&catalog), Arc::clone(&cache));
+ let resolver = NamespaceSchemaResolver::new(Arc::clone(&cache));
let err = resolver
.get_namespace_id(&ns)
@@ -233,11 +210,14 @@ mod tests {
async fn test_cache_miss_does_not_exist() {
let ns = NamespaceName::try_from("bananas").unwrap();
- let cache = Arc::new(MemoryNamespaceCache::default());
let metrics = Arc::new(metric::Registry::new());
let catalog: Arc<dyn Catalog> = Arc::new(MemCatalog::new(metrics));
+ let cache = Arc::new(ReadThroughCache::new(
+ Arc::new(MemoryNamespaceCache::default()),
+ Arc::clone(&catalog),
+ ));
- let resolver = NamespaceSchemaResolver::new(Arc::clone(&catalog), Arc::clone(&cache));
+ let resolver = NamespaceSchemaResolver::new(Arc::clone(&cache));
let err = resolver
.get_namespace_id(&ns)
diff --git a/router/src/namespace_resolver/ns_autocreation.rs b/router/src/namespace_resolver/ns_autocreation.rs
index b59c077189..a0514ee098 100644
--- a/router/src/namespace_resolver/ns_autocreation.rs
+++ b/router/src/namespace_resolver/ns_autocreation.rs
@@ -79,7 +79,7 @@ impl<C, T> NamespaceAutocreation<C, T> {
#[async_trait]
impl<C, T> NamespaceResolver for NamespaceAutocreation<C, T>
where
- C: NamespaceCache,
+ C: NamespaceCache<ReadError = iox_catalog::interface::Error>, // The resolver relies on the cache for read-through cache behaviour
T: NamespaceResolver,
{
/// Force the creation of `namespace` if it does not already exist in the
@@ -153,7 +153,7 @@ mod tests {
use super::*;
use crate::{
- namespace_cache::MemoryNamespaceCache,
+ namespace_cache::{MemoryNamespaceCache, ReadThroughCache},
namespace_resolver::{mock::MockNamespaceResolver, NamespaceSchemaResolver},
};
@@ -166,8 +166,14 @@ mod tests {
let ns = NamespaceName::try_from("bananas").unwrap();
+ let metrics = Arc::new(metric::Registry::new());
+ let catalog: Arc<dyn Catalog> = Arc::new(MemCatalog::new(metrics));
+
// Prep the cache before the test to cause a hit
- let cache = Arc::new(MemoryNamespaceCache::default());
+ let cache = Arc::new(ReadThroughCache::new(
+ Arc::new(MemoryNamespaceCache::default()),
+ Arc::clone(&catalog),
+ ));
cache.put_schema(
ns.clone(),
NamespaceSchema {
@@ -181,9 +187,6 @@ mod tests {
},
);
- let metrics = Arc::new(metric::Registry::new());
- let catalog: Arc<dyn Catalog> = Arc::new(MemCatalog::new(metrics));
-
let creator = NamespaceAutocreation::new(
MockNamespaceResolver::default().with_mapping(ns.clone(), NAMESPACE_ID),
cache,
@@ -218,10 +221,14 @@ mod tests {
async fn test_cache_miss() {
let ns = NamespaceName::try_from("bananas").unwrap();
- let cache = Arc::new(MemoryNamespaceCache::default());
let metrics = Arc::new(metric::Registry::new());
let catalog: Arc<dyn Catalog> = Arc::new(MemCatalog::new(metrics));
+ let cache = Arc::new(ReadThroughCache::new(
+ Arc::new(MemoryNamespaceCache::default()),
+ Arc::clone(&catalog),
+ ));
+
let creator = NamespaceAutocreation::new(
MockNamespaceResolver::default().with_mapping(ns.clone(), NamespaceId::new(1)),
cache,
@@ -266,9 +273,12 @@ mod tests {
async fn test_reject() {
let ns = NamespaceName::try_from("bananas").unwrap();
- let cache = Arc::new(MemoryNamespaceCache::default());
let metrics = Arc::new(metric::Registry::new());
let catalog: Arc<dyn Catalog> = Arc::new(MemCatalog::new(metrics));
+ let cache = Arc::new(ReadThroughCache::new(
+ Arc::new(MemoryNamespaceCache::default()),
+ Arc::clone(&catalog),
+ ));
let creator = NamespaceAutocreation::new(
MockNamespaceResolver::default(),
@@ -302,13 +312,16 @@ mod tests {
async fn test_reject_exists_in_catalog() {
let ns = NamespaceName::try_from("bananas").unwrap();
- let cache = Arc::new(MemoryNamespaceCache::default());
let metrics = Arc::new(metric::Registry::new());
let catalog: Arc<dyn Catalog> = Arc::new(MemCatalog::new(metrics));
+ let cache = Arc::new(ReadThroughCache::new(
+ Arc::new(MemoryNamespaceCache::default()),
+ Arc::clone(&catalog),
+ ));
// First drive the population of the catalog
let creator = NamespaceAutocreation::new(
- NamespaceSchemaResolver::new(Arc::clone(&catalog), Arc::clone(&cache)),
+ NamespaceSchemaResolver::new(Arc::clone(&cache)),
Arc::clone(&cache),
Arc::clone(&catalog),
TopicId::new(42),
@@ -323,7 +336,7 @@ mod tests {
// Now try in "reject" mode.
let creator = NamespaceAutocreation::new(
- NamespaceSchemaResolver::new(Arc::clone(&catalog), Arc::clone(&cache)),
+ NamespaceSchemaResolver::new(Arc::clone(&cache)),
cache,
Arc::clone(&catalog),
TopicId::new(42),
diff --git a/router/tests/common/mod.rs b/router/tests/common/mod.rs
index b6de381f9d..0f5f642c52 100644
--- a/router/tests/common/mod.rs
+++ b/router/tests/common/mod.rs
@@ -170,8 +170,7 @@ impl TestContext {
parts: vec![TemplatePart::TimeFormat("%Y-%m-%d".to_owned())],
});
- let namespace_resolver =
- NamespaceSchemaResolver::new(Arc::clone(&catalog), Arc::clone(&ns_cache));
+ let namespace_resolver = NamespaceSchemaResolver::new(Arc::clone(&ns_cache));
let namespace_resolver = NamespaceAutocreation::new(
namespace_resolver,
Arc::clone(&ns_cache),
|
e8a480f5f613d2ab61821f692b744a803e6e44d1
|
Carol (Nichols || Goulding)
|
2023-05-08 11:32:44
|
Give up ownership of Column when adding to a table
|
To enable reuse of existing allocations rather than borrowing, creating
new allocations, then dropping them.
| null |
fix: Give up ownership of Column when adding to a table
To enable reuse of existing allocations rather than borrowing, creating
new allocations, then dropping them.
|
diff --git a/data_types/src/columns.rs b/data_types/src/columns.rs
index 8990ffc440..45876e1a56 100644
--- a/data_types/src/columns.rs
+++ b/data_types/src/columns.rs
@@ -70,12 +70,8 @@ impl ColumnsByName {
/// # Panics
///
/// This method panics if a column of the same name already exists in `self`.
- pub fn add_column(
- &mut self,
- column_name: impl Into<String>,
- column_schema: impl Into<ColumnSchema>,
- ) {
- let old = self.0.insert(column_name.into(), column_schema.into());
+ pub fn add_column(&mut self, column_name: String, column_schema: ColumnSchema) {
+ let old = self.0.insert(column_name, column_schema);
assert!(old.is_none());
}
@@ -207,19 +203,6 @@ impl ColumnSchema {
}
}
-impl From<&Column> for ColumnSchema {
- fn from(c: &Column) -> Self {
- let Column {
- id, column_type, ..
- } = c;
-
- Self {
- id: *id,
- column_type: *column_type,
- }
- }
-}
-
/// The column data type
#[allow(missing_docs)]
#[derive(Debug, Copy, Clone, Eq, PartialEq, PartialOrd, Ord, Hash, sqlx::Type)]
diff --git a/data_types/src/lib.rs b/data_types/src/lib.rs
index 6c7d4c6fee..4519cb9d10 100644
--- a/data_types/src/lib.rs
+++ b/data_types/src/lib.rs
@@ -405,8 +405,16 @@ impl TableSchema {
///
/// This method panics if a column of the same name already exists in
/// `self`.
- pub fn add_column(&mut self, col: &Column) {
- self.columns.add_column(&col.name, col);
+ pub fn add_column(&mut self, col: Column) {
+ let Column {
+ id,
+ name,
+ column_type,
+ ..
+ } = col;
+
+ let column_schema = ColumnSchema { id, column_type };
+ self.add_column_schema(name, column_schema);
}
/// Add the name and column schema to this table's schema.
@@ -415,8 +423,8 @@ impl TableSchema {
///
/// This method panics if a column of the same name already exists in
/// `self`.
- pub fn add_column_schema(&mut self, name: &str, column_schema: &ColumnSchema) {
- self.columns.add_column(name, column_schema.to_owned());
+ pub fn add_column_schema(&mut self, column_name: String, column_schema: ColumnSchema) {
+ self.columns.add_column(column_name, column_schema);
}
/// Estimated Size in bytes including `self`.
diff --git a/import/src/aggregate_tsm_schema/update_catalog.rs b/import/src/aggregate_tsm_schema/update_catalog.rs
index 077d97cfb9..173c5bcfa1 100644
--- a/import/src/aggregate_tsm_schema/update_catalog.rs
+++ b/import/src/aggregate_tsm_schema/update_catalog.rs
@@ -131,7 +131,7 @@ where
.columns()
.create_or_get("time", table.id, ColumnType::Time)
.await?;
- table.add_column(&time_col);
+ table.add_column(time_col);
table
}
};
@@ -442,7 +442,7 @@ mod tests {
.create_or_get("time", table.id, ColumnType::Time)
.await
.expect("column created");
- table.add_column(&time_col);
+ table.add_column(time_col);
let location_col = txn
.columns()
.create_or_get("city", table.id, ColumnType::Tag)
@@ -453,8 +453,8 @@ mod tests {
.create_or_get("temperature", table.id, ColumnType::F64)
.await
.expect("column created");
- table.add_column(&location_col);
- table.add_column(&temperature_col);
+ table.add_column(location_col);
+ table.add_column(temperature_col);
txn.commit().await.unwrap();
// merge with aggregate schema that has some overlap
@@ -534,13 +534,13 @@ mod tests {
.create_or_get("time", table.id, ColumnType::Time)
.await
.expect("column created");
- table.add_column(&time_col);
+ table.add_column(time_col);
let temperature_col = txn
.columns()
.create_or_get("temperature", table.id, ColumnType::F64)
.await
.expect("column created");
- table.add_column(&temperature_col);
+ table.add_column(temperature_col);
txn.commit().await.unwrap();
// merge with aggregate schema that has some issue that will trip a catalog error
@@ -599,13 +599,13 @@ mod tests {
.create_or_get("time", table.id, ColumnType::Time)
.await
.expect("column created");
- table.add_column(&time_col);
+ table.add_column(time_col);
let temperature_col = txn
.columns()
.create_or_get("temperature", table.id, ColumnType::F64)
.await
.expect("column created");
- table.add_column(&temperature_col);
+ table.add_column(temperature_col);
txn.commit().await.unwrap();
// merge with aggregate schema that has some issue that will trip a catalog error
diff --git a/iox_catalog/src/interface.rs b/iox_catalog/src/interface.rs
index ce7b33b37e..02a51c4633 100644
--- a/iox_catalog/src/interface.rs
+++ b/iox_catalog/src/interface.rs
@@ -598,7 +598,7 @@ where
for c in columns {
let (_, t) = table_id_to_schema.get_mut(&c.table_id).unwrap();
- t.add_column(&c);
+ t.add_column(c);
}
for (_, (table_name, schema)) in table_id_to_schema {
@@ -700,7 +700,7 @@ pub async fn list_schemas(
.entry(table.name.clone())
.or_insert_with(|| TableSchema::new_empty_from(table));
- table_schema.add_column(&column);
+ table_schema.add_column(column);
}
// The table map is no longer needed - immediately reclaim the memory.
diff --git a/iox_catalog/src/lib.rs b/iox_catalog/src/lib.rs
index 8988466f14..880e98e1f6 100644
--- a/iox_catalog/src/lib.rs
+++ b/iox_catalog/src/lib.rs
@@ -126,7 +126,7 @@ where
.create_or_get(TIME_COLUMN, table.id, ColumnType::Time)
.await?;
- table.add_column(&time_col);
+ table.add_column(time_col);
assert!(schema
.to_mut()
@@ -185,7 +185,7 @@ where
.create_or_get_many_unchecked(table.id, column_batch)
.await?
.into_iter()
- .for_each(|c| table.to_mut().add_column(&c));
+ .for_each(|c| table.to_mut().add_column(c));
}
if let Cow::Owned(table) = table {
diff --git a/router/src/namespace_cache/memory.rs b/router/src/namespace_cache/memory.rs
index 46e1912e8a..57f6b2a869 100644
--- a/router/src/namespace_cache/memory.rs
+++ b/router/src/namespace_cache/memory.rs
@@ -105,7 +105,7 @@ fn merge_schema_additive(
Some(new_table) => {
for (column_name, column) in old_table.columns.iter() {
if !new_table.contains_column_name(column_name) {
- new_table.add_column_schema(column_name, column);
+ new_table.add_column_schema(column_name.to_string(), *column);
}
}
}
@@ -218,9 +218,9 @@ mod tests {
};
let mut first_write_table_schema = TableSchema::new(table_id);
- first_write_table_schema.add_column(&column_1);
+ first_write_table_schema.add_column(column_1.clone());
let mut second_write_table_schema = TableSchema::new(table_id);
- second_write_table_schema.add_column(&column_2);
+ second_write_table_schema.add_column(column_2.clone());
// These MUST always be different
assert_ne!(first_write_table_schema, second_write_table_schema);
@@ -240,8 +240,8 @@ mod tests {
let want_namespace_schema = {
let mut want_table_schema = TableSchema::new(table_id);
- want_table_schema.add_column(&column_1);
- want_table_schema.add_column(&column_2);
+ want_table_schema.add_column(column_1);
+ want_table_schema.add_column(column_2);
NamespaceSchema {
tables: BTreeMap::from([(String::from(table_name), want_table_schema)]),
..schema_update_1.clone()
@@ -292,21 +292,21 @@ mod tests {
// Each table has been given a column to assert the table merge logic
// produces the correct metrics.
let mut table_1 = TableSchema::new(TableId::new(1));
- table_1.add_column(&Column {
+ table_1.add_column(Column {
id: ColumnId::new(1),
table_id: TableId::new(1),
name: "column_a".to_string(),
column_type: ColumnType::String,
});
let mut table_2 = TableSchema::new(TableId::new(2));
- table_2.add_column(&Column {
+ table_2.add_column(Column {
id: ColumnId::new(2),
table_id: TableId::new(2),
name: "column_b".to_string(),
column_type: ColumnType::String,
});
let mut table_3 = TableSchema::new(TableId::new(3));
- table_3.add_column(&Column {
+ table_3.add_column(Column {
id: ColumnId::new(3),
table_id: TableId::new(3),
name: "column_c".to_string(),
|
29ab3a2913c11749ebf3ba2b595fcfa417f30ac1
|
Dom Dwyer
|
2023-05-22 14:29:12
|
add missing lints to logfmt
|
Adds the standard lints to logfmt and fixes any lint failures.
Note this doesn't include the normal "document things" lint, because
there's a load of missing docs
| null |
refactor(lints): add missing lints to logfmt
Adds the standard lints to logfmt and fixes any lint failures.
Note this doesn't include the normal "document things" lint, because
there's a load of missing docs
|
diff --git a/logfmt/src/lib.rs b/logfmt/src/lib.rs
index b4fbfbc8e4..5c5ba8437a 100644
--- a/logfmt/src/lib.rs
+++ b/logfmt/src/lib.rs
@@ -1,4 +1,14 @@
#![deny(rustdoc::broken_intra_doc_links, rustdoc::bare_urls, rust_2018_idioms)]
+#![warn(
+ missing_copy_implementations,
+ missing_debug_implementations,
+ clippy::explicit_iter_loop,
+ // See https://github.com/influxdata/influxdb_iox/pull/1671
+ clippy::future_not_send,
+ clippy::clone_on_ref_ptr,
+ clippy::todo,
+ clippy::dbg_macro
+)]
use observability_deps::tracing::{
self,
@@ -19,6 +29,7 @@ use tracing_subscriber::{fmt::MakeWriter, layer::Context, registry::LookupSpan,
/// looked very small and did not (obviously) work with the tracing subscriber
///
/// [logfmt]: https://brandur.org/logfmt
+#[derive(Debug)]
pub struct LogFmtLayer<W>
where
W: for<'writer> MakeWriter<'writer>,
|
90a25a3ff02c9e3b9eb4a6d15436014c84d173fa
|
Christopher M. Wolff
|
2023-05-18 10:51:16
|
update DataFusion (#7825)
|
* chore: update DataFusion
* chore: Run cargo hakari tasks
---------
|
Co-authored-by: CircleCI[bot] <[email protected]>
|
chore: update DataFusion (#7825)
* chore: update DataFusion
* chore: Run cargo hakari tasks
---------
Co-authored-by: CircleCI[bot] <[email protected]>
|
diff --git a/Cargo.lock b/Cargo.lock
index ea507f641d..b80bd9f05f 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -82,9 +82,9 @@ checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299"
[[package]]
name = "anstream"
-version = "0.3.0"
+version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9e579a7752471abc2a8268df8b20005e3eadd975f585398f17efcfd8d4927371"
+checksum = "0ca84f3628370c59db74ee214b3263d58f9aadd9b4fe7e711fd87dc452b7f163"
dependencies = [
"anstyle",
"anstyle-parse",
@@ -121,9 +121,9 @@ dependencies = [
[[package]]
name = "anstyle-wincon"
-version = "1.0.0"
+version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4bcd8291a340dd8ac70e18878bc4501dd7b4ff970cfa21c207d36ece51ea88fd"
+checksum = "180abfa45703aebe0093f79badacc01b8fd4ea2e35118747e5811127f926e188"
dependencies = [
"anstyle",
"windows-sys 0.48.0",
@@ -131,9 +131,9 @@ dependencies = [
[[package]]
name = "anyhow"
-version = "1.0.70"
+version = "1.0.71"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7de8ce5e0f9f8d88245311066a578d72b7af3e7088f32783804676302df237e4"
+checksum = "9c7d0618f0e0b7e8ff11427422b64564d5fb0be1940354bfe2e0529b18a9d9b8"
[[package]]
name = "arrayref"
@@ -476,7 +476,7 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.15",
+ "syn 2.0.16",
]
[[package]]
@@ -487,7 +487,7 @@ checksum = "b9ccdd8f2a161be9bd5c023df56f1b2a0bd1d83872ae53b71a84a12c9bf6e842"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.15",
+ "syn 2.0.16",
]
[[package]]
@@ -539,9 +539,9 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
[[package]]
name = "axum"
-version = "0.6.15"
+version = "0.6.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3b32c5ea3aabaf4deb5f5ced2d688ec0844c881c9e6c696a8b769a05fc691e62"
+checksum = "f8175979259124331c1d7bf6586ee7e0da434155e4b2d48ec2c8386281d8df39"
dependencies = [
"async-trait",
"axum-core",
@@ -708,9 +708,9 @@ dependencies = [
[[package]]
name = "bumpalo"
-version = "3.12.0"
+version = "3.12.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0d261e256854913907f67ed06efbc3338dfe6179796deefc1ff763fc1aee5535"
+checksum = "3c6ed94e98ecff0c12dd1b04c15ec0d7d9458ca8fe806cea6f12954efe74c63b"
[[package]]
name = "bytemuck"
@@ -830,9 +830,9 @@ dependencies = [
[[package]]
name = "ciborium"
-version = "0.2.0"
+version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b0c137568cc60b904a7724001b35ce2630fd00d5d84805fbb608ab89509d788f"
+checksum = "effd91f6c78e5a4ace8a5d3c0b6bfaec9e2baaef55f3efc00e45fb2e477ee926"
dependencies = [
"ciborium-io",
"ciborium-ll",
@@ -841,15 +841,15 @@ dependencies = [
[[package]]
name = "ciborium-io"
-version = "0.2.0"
+version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "346de753af073cc87b52b2083a506b38ac176a44cfb05497b622e27be899b369"
+checksum = "cdf919175532b369853f5d5e20b26b43112613fd6fe7aee757e35f7a44642656"
[[package]]
name = "ciborium-ll"
-version = "0.2.0"
+version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "213030a2b5a4e0c0892b6652260cf6ccac84827b83a85a534e178e3906c4cf1b"
+checksum = "defaa24ecc093c77630e6c15e17c51f5e187bf35ee514f4e2d67baaa96dae22b"
dependencies = [
"ciborium-io",
"half 1.8.2",
@@ -857,9 +857,9 @@ dependencies = [
[[package]]
name = "clap"
-version = "3.2.23"
+version = "3.2.25"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "71655c45cb9845d3270c9d6df84ebe72b4dad3c2ba3f7023ad47c144e4e473a5"
+checksum = "4ea181bf566f71cb9a5d17a59e1871af638180a18fb0035c92ae62b705207123"
dependencies = [
"bitflags",
"clap_lex 0.2.4",
@@ -927,7 +927,7 @@ dependencies = [
"heck",
"proc-macro2",
"quote",
- "syn 2.0.15",
+ "syn 2.0.16",
]
[[package]]
@@ -970,16 +970,6 @@ dependencies = [
"winapi",
]
-[[package]]
-name = "codespan-reporting"
-version = "0.11.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3538270d33cc669650c4b093848450d380def10c331d38c768e34cac80576e6e"
-dependencies = [
- "termcolor",
- "unicode-width",
-]
-
[[package]]
name = "colorchoice"
version = "1.0.0"
@@ -1080,14 +1070,14 @@ dependencies = [
[[package]]
name = "console"
-version = "0.15.5"
+version = "0.15.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c3d79fbe8970a77e3e34151cc13d3b3e248aa0faaecb9f6091fa07ebefe5ad60"
+checksum = "d0525278dce688103060006713371cedbad27186c7d913f33d866b498da0f595"
dependencies = [
"encode_unicode",
"lazy_static",
"libc",
- "windows-sys 0.42.0",
+ "windows-sys 0.45.0",
]
[[package]]
@@ -1163,18 +1153,18 @@ checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa"
[[package]]
name = "cpp_demangle"
-version = "0.4.0"
+version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b446fd40bcc17eddd6a4a78f24315eb90afdb3334999ddfd4909985c47722442"
+checksum = "2c76f98bdfc7f66172e6c7065f981ebb576ffc903fe4c0561d9f0c2509226dc6"
dependencies = [
"cfg-if",
]
[[package]]
name = "cpufeatures"
-version = "0.2.6"
+version = "0.2.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "280a9f2d8b3a38871a3c8a46fb80db65e5e5ed97da80c4d08bf27fb63e35e181"
+checksum = "3e4c1eaa2012c47becbbad2ab175484c2a84d1185b566fb2cc5b8707343dfe58"
dependencies = [
"libc",
]
@@ -1213,7 +1203,7 @@ dependencies = [
"atty",
"cast",
"ciborium",
- "clap 3.2.23",
+ "clap 3.2.25",
"criterion-plot",
"futures",
"itertools",
@@ -1359,50 +1349,6 @@ dependencies = [
"syn 1.0.109",
]
-[[package]]
-name = "cxx"
-version = "1.0.94"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f61f1b6389c3fe1c316bf8a4dccc90a38208354b330925bce1f74a6c4756eb93"
-dependencies = [
- "cc",
- "cxxbridge-flags",
- "cxxbridge-macro",
- "link-cplusplus",
-]
-
-[[package]]
-name = "cxx-build"
-version = "1.0.94"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "12cee708e8962df2aeb38f594aae5d827c022b6460ac71a7a3e2c3c2aae5a07b"
-dependencies = [
- "cc",
- "codespan-reporting",
- "once_cell",
- "proc-macro2",
- "quote",
- "scratch",
- "syn 2.0.15",
-]
-
-[[package]]
-name = "cxxbridge-flags"
-version = "1.0.94"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7944172ae7e4068c533afbb984114a56c46e9ccddda550499caa222902c7f7bb"
-
-[[package]]
-name = "cxxbridge-macro"
-version = "1.0.94"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2345488264226bf682893e25de0769f3360aac9957980ec49361b083ddaa5bc5"
-dependencies = [
- "proc-macro2",
- "quote",
- "syn 2.0.15",
-]
-
[[package]]
name = "dashmap"
version = "5.4.0"
@@ -1441,7 +1387,7 @@ dependencies = [
[[package]]
name = "datafusion"
version = "24.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=19b03240920ad63cac916b42951754c0337bdac8#19b03240920ad63cac916b42951754c0337bdac8"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=c24830a898b5f79f9058624fbcea7ba3c66ff065#c24830a898b5f79f9058624fbcea7ba3c66ff065"
dependencies = [
"ahash 0.8.3",
"arrow",
@@ -1490,7 +1436,7 @@ dependencies = [
[[package]]
name = "datafusion-common"
version = "24.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=19b03240920ad63cac916b42951754c0337bdac8#19b03240920ad63cac916b42951754c0337bdac8"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=c24830a898b5f79f9058624fbcea7ba3c66ff065#c24830a898b5f79f9058624fbcea7ba3c66ff065"
dependencies = [
"arrow",
"arrow-array",
@@ -1504,7 +1450,7 @@ dependencies = [
[[package]]
name = "datafusion-execution"
version = "24.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=19b03240920ad63cac916b42951754c0337bdac8#19b03240920ad63cac916b42951754c0337bdac8"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=c24830a898b5f79f9058624fbcea7ba3c66ff065#c24830a898b5f79f9058624fbcea7ba3c66ff065"
dependencies = [
"dashmap",
"datafusion-common",
@@ -1521,7 +1467,7 @@ dependencies = [
[[package]]
name = "datafusion-expr"
version = "24.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=19b03240920ad63cac916b42951754c0337bdac8#19b03240920ad63cac916b42951754c0337bdac8"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=c24830a898b5f79f9058624fbcea7ba3c66ff065#c24830a898b5f79f9058624fbcea7ba3c66ff065"
dependencies = [
"ahash 0.8.3",
"arrow",
@@ -1532,7 +1478,7 @@ dependencies = [
[[package]]
name = "datafusion-optimizer"
version = "24.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=19b03240920ad63cac916b42951754c0337bdac8#19b03240920ad63cac916b42951754c0337bdac8"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=c24830a898b5f79f9058624fbcea7ba3c66ff065#c24830a898b5f79f9058624fbcea7ba3c66ff065"
dependencies = [
"arrow",
"async-trait",
@@ -1549,7 +1495,7 @@ dependencies = [
[[package]]
name = "datafusion-physical-expr"
version = "24.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=19b03240920ad63cac916b42951754c0337bdac8#19b03240920ad63cac916b42951754c0337bdac8"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=c24830a898b5f79f9058624fbcea7ba3c66ff065#c24830a898b5f79f9058624fbcea7ba3c66ff065"
dependencies = [
"ahash 0.8.3",
"arrow",
@@ -1581,7 +1527,7 @@ dependencies = [
[[package]]
name = "datafusion-proto"
version = "24.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=19b03240920ad63cac916b42951754c0337bdac8#19b03240920ad63cac916b42951754c0337bdac8"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=c24830a898b5f79f9058624fbcea7ba3c66ff065#c24830a898b5f79f9058624fbcea7ba3c66ff065"
dependencies = [
"arrow",
"chrono",
@@ -1595,7 +1541,7 @@ dependencies = [
[[package]]
name = "datafusion-row"
version = "24.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=19b03240920ad63cac916b42951754c0337bdac8#19b03240920ad63cac916b42951754c0337bdac8"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=c24830a898b5f79f9058624fbcea7ba3c66ff065#c24830a898b5f79f9058624fbcea7ba3c66ff065"
dependencies = [
"arrow",
"datafusion-common",
@@ -1606,7 +1552,7 @@ dependencies = [
[[package]]
name = "datafusion-sql"
version = "24.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=19b03240920ad63cac916b42951754c0337bdac8#19b03240920ad63cac916b42951754c0337bdac8"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=c24830a898b5f79f9058624fbcea7ba3c66ff065#c24830a898b5f79f9058624fbcea7ba3c66ff065"
dependencies = [
"arrow",
"arrow-schema",
@@ -1977,7 +1923,7 @@ checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.15",
+ "syn 2.0.16",
]
[[package]]
@@ -2141,9 +2087,9 @@ dependencies = [
[[package]]
name = "h2"
-version = "0.3.18"
+version = "0.3.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "17f8a914c2987b688368b5138aa05321db91f4090cf26118185672ad588bce21"
+checksum = "d357c7ae988e7d2182f7d7871d0b963962420b0678b0997ce7de72001aeab782"
dependencies = [
"bytes",
"fnv",
@@ -2193,9 +2139,6 @@ name = "hashbrown"
version = "0.12.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888"
-dependencies = [
- "ahash 0.7.6",
-]
[[package]]
name = "hashbrown"
@@ -2208,11 +2151,11 @@ dependencies = [
[[package]]
name = "hashlink"
-version = "0.8.1"
+version = "0.8.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "69fe1fcf8b4278d860ad0548329f892a3631fb63f82574df68275f34cdbe0ffa"
+checksum = "0761a1b9491c4f2e3d66aa0f62d0fba0af9a0e2852e4d48ea506632a4b56e6aa"
dependencies = [
- "hashbrown 0.12.3",
+ "hashbrown 0.13.2",
]
[[package]]
@@ -2420,12 +2363,11 @@ dependencies = [
[[package]]
name = "iana-time-zone-haiku"
-version = "0.1.1"
+version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0703ae284fc167426161c2e3f1da3ea71d94b21bedbcc9494e92b28e334e3dca"
+checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f"
dependencies = [
- "cxx",
- "cxx-build",
+ "cc",
]
[[package]]
@@ -3235,9 +3177,9 @@ dependencies = [
[[package]]
name = "js-sys"
-version = "0.3.61"
+version = "0.3.63"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "445dde2150c55e483f3d8416706b97ec8e8237c307e5b7b4b8dd15e6af2a0730"
+checksum = "2f37a4a5928311ac501dee68b3c7613a1037d0edb30c8e5427bd832d55d1b790"
dependencies = [
"wasm-bindgen",
]
@@ -3320,9 +3262,9 @@ checksum = "2b00cc1c228a6782d0f076e7b232802e0c5689d41bb5df366f2a6b6621cfdfe1"
[[package]]
name = "libm"
-version = "0.2.6"
+version = "0.2.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "348108ab3fba42ec82ff6e9564fc4ca0247bdccdc68dd8af9764bbc79c3c8ffb"
+checksum = "f7012b1bbb0719e1097c47611d3898568c546d597c2e74d66f6087edd5233ff4"
[[package]]
name = "libsqlite3-sys"
@@ -3335,15 +3277,6 @@ dependencies = [
"vcpkg",
]
-[[package]]
-name = "link-cplusplus"
-version = "1.0.8"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ecd207c9c713c34f95a097a5b029ac2ce6010530c7b49d7fea24d977dede04f5"
-dependencies = [
- "cc",
-]
-
[[package]]
name = "linked-hash-map"
version = "0.5.6"
@@ -3352,9 +3285,9 @@ checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f"
[[package]]
name = "linux-raw-sys"
-version = "0.3.6"
+version = "0.3.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b64f40e5e03e0d54f03845c8197d0291253cdbedfb1cb46b13c2c117554a9f4c"
+checksum = "ece97ea872ece730aed82664c424eb4c8291e1ff2480247ccf7409044bc6479f"
[[package]]
name = "lock_api"
@@ -4099,9 +4032,9 @@ checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e"
[[package]]
name = "pest"
-version = "2.5.7"
+version = "2.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7b1403e8401ad5dedea73c626b99758535b342502f8d1e361f4a2dd952749122"
+checksum = "e68e84bfb01f0507134eac1e9b410a12ba379d064eab48c50ba4ce329a527b70"
dependencies = [
"thiserror",
"ucd-trie",
@@ -4109,9 +4042,9 @@ dependencies = [
[[package]]
name = "pest_derive"
-version = "2.5.7"
+version = "2.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "be99c4c1d2fc2769b1d00239431d711d08f6efedcecb8b6e30707160aee99c15"
+checksum = "6b79d4c71c865a25a4322296122e3924d30bc8ee0834c8bfc8b95f7f054afbfb"
dependencies = [
"pest",
"pest_generator",
@@ -4119,22 +4052,22 @@ dependencies = [
[[package]]
name = "pest_generator"
-version = "2.5.7"
+version = "2.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e56094789873daa36164de2e822b3888c6ae4b4f9da555a1103587658c805b1e"
+checksum = "6c435bf1076437b851ebc8edc3a18442796b30f1728ffea6262d59bbe28b077e"
dependencies = [
"pest",
"pest_meta",
"proc-macro2",
"quote",
- "syn 2.0.15",
+ "syn 2.0.16",
]
[[package]]
name = "pest_meta"
-version = "2.5.7"
+version = "2.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6733073c7cff3d8459fda0e42f13a047870242aed8b509fe98000928975f359e"
+checksum = "745a452f8eb71e39ffd8ee32b3c5f51d03845f99786fa9b68db6ff509c505411"
dependencies = [
"once_cell",
"pest",
@@ -4206,7 +4139,7 @@ checksum = "39407670928234ebc5e6e580247dd567ad73a3578460c5990f9503df207e8f07"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.15",
+ "syn 2.0.16",
]
[[package]]
@@ -4223,9 +4156,9 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
[[package]]
name = "pkg-config"
-version = "0.3.26"
+version = "0.3.27"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6ac9a59f73473f1b8d852421e59e64809f025994837ef743615c6d0c5b305160"
+checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964"
[[package]]
name = "pprof"
@@ -4339,9 +4272,9 @@ checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068"
[[package]]
name = "proc-macro2"
-version = "1.0.56"
+version = "1.0.58"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2b63bdb0cd06f1f4dedf69b254734f9b45af66e4a031e42a7480257d9898b435"
+checksum = "fa1fb82fc0c281dd9671101b66b771ebbe1eaf967b96ac8740dcba4b70005ca8"
dependencies = [
"unicode-ident",
]
@@ -4547,9 +4480,9 @@ dependencies = [
[[package]]
name = "quote"
-version = "1.0.26"
+version = "1.0.27"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4424af4bf778aae2051a77b60283332f386554255d722233d09fbfc7e30da2fc"
+checksum = "8f4f29d145265ec1c483c7c654450edde0bfe043d3938d6972630663356d9500"
dependencies = [
"proc-macro2",
]
@@ -4796,9 +4729,9 @@ dependencies = [
[[package]]
name = "rustc-demangle"
-version = "0.1.22"
+version = "0.1.23"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d4a36c42d1873f9a77c53bde094f9664d9891bc604a45b4798fd2c389ed12e5b"
+checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76"
[[package]]
name = "rustc_version"
@@ -4938,12 +4871,6 @@ version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
-[[package]]
-name = "scratch"
-version = "1.0.5"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1792db035ce95be60c3f8853017b3999209281c24e2ba5bc8e59bf97a0c590c1"
-
[[package]]
name = "sct"
version = "0.7.0"
@@ -4983,7 +4910,7 @@ checksum = "8c805777e3930c8883389c602315a24224bcc738b63905ef87cd1420353ea93e"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.15",
+ "syn 2.0.16",
]
[[package]]
@@ -5564,9 +5491,9 @@ dependencies = [
[[package]]
name = "syn"
-version = "2.0.15"
+version = "2.0.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a34fcf3e8b60f57e6a14301a2e916d323af98b0ea63c599441eec8558660c822"
+checksum = "a6f671d4b5ffdb8eadec19c0ae67fe2639df8684bd7bc4b83d986b8db549cf01"
dependencies = [
"proc-macro2",
"quote",
@@ -5598,15 +5525,6 @@ dependencies = [
"windows-sys 0.45.0",
]
-[[package]]
-name = "termcolor"
-version = "1.2.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "be55cf8942feac5c765c2c993422806843c9a9a45d4d5c407ad6dd2ea95eb9b6"
-dependencies = [
- "winapi-util",
-]
-
[[package]]
name = "termtree"
version = "0.4.1"
@@ -5689,7 +5607,7 @@ checksum = "f9456a42c5b0d803c8cd86e73dd7cc9edd429499f37a3550d286d5e86720569f"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.15",
+ "syn 2.0.16",
]
[[package]]
@@ -5817,7 +5735,7 @@ checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.15",
+ "syn 2.0.16",
]
[[package]]
@@ -6091,7 +6009,7 @@ checksum = "0f57e3ca2a01450b1a921183a9c9cbfda207fd822cef4ccb00a65402cbba7a74"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.15",
+ "syn 2.0.16",
]
[[package]]
@@ -6392,9 +6310,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
[[package]]
name = "wasm-bindgen"
-version = "0.2.84"
+version = "0.2.86"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "31f8dcbc21f30d9b8f2ea926ecb58f6b91192c17e9d33594b3df58b2007ca53b"
+checksum = "5bba0e8cb82ba49ff4e229459ff22a191bbe9a1cb3a341610c9c33efc27ddf73"
dependencies = [
"cfg-if",
"wasm-bindgen-macro",
@@ -6402,24 +6320,24 @@ dependencies = [
[[package]]
name = "wasm-bindgen-backend"
-version = "0.2.84"
+version = "0.2.86"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "95ce90fd5bcc06af55a641a86428ee4229e44e07033963a2290a8e241607ccb9"
+checksum = "19b04bc93f9d6bdee709f6bd2118f57dd6679cf1176a1af464fca3ab0d66d8fb"
dependencies = [
"bumpalo",
"log",
"once_cell",
"proc-macro2",
"quote",
- "syn 1.0.109",
+ "syn 2.0.16",
"wasm-bindgen-shared",
]
[[package]]
name = "wasm-bindgen-futures"
-version = "0.4.34"
+version = "0.4.36"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f219e0d211ba40266969f6dbdd90636da12f75bee4fc9d6c23d1260dadb51454"
+checksum = "2d1985d03709c53167ce907ff394f5316aa22cb4e12761295c5dc57dacb6297e"
dependencies = [
"cfg-if",
"js-sys",
@@ -6429,9 +6347,9 @@ dependencies = [
[[package]]
name = "wasm-bindgen-macro"
-version = "0.2.84"
+version = "0.2.86"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4c21f77c0bedc37fd5dc21f897894a5ca01e7bb159884559461862ae90c0b4c5"
+checksum = "14d6b024f1a526bb0234f52840389927257beb670610081360e5a03c5df9c258"
dependencies = [
"quote",
"wasm-bindgen-macro-support",
@@ -6439,22 +6357,22 @@ dependencies = [
[[package]]
name = "wasm-bindgen-macro-support"
-version = "0.2.84"
+version = "0.2.86"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2aff81306fcac3c7515ad4e177f521b5c9a15f2b08f4e32d823066102f35a5f6"
+checksum = "e128beba882dd1eb6200e1dc92ae6c5dbaa4311aa7bb211ca035779e5efc39f8"
dependencies = [
"proc-macro2",
"quote",
- "syn 1.0.109",
+ "syn 2.0.16",
"wasm-bindgen-backend",
"wasm-bindgen-shared",
]
[[package]]
name = "wasm-bindgen-shared"
-version = "0.2.84"
+version = "0.2.86"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0046fef7e28c3804e5e38bfa31ea2a0f73905319b677e57ebe37e49358989b5d"
+checksum = "ed9d5b4305409d1fc9482fee2d7f9bcbf24b3972bf59817ef757e23982242a93"
[[package]]
name = "wasm-streams"
@@ -6471,9 +6389,9 @@ dependencies = [
[[package]]
name = "web-sys"
-version = "0.3.61"
+version = "0.3.63"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e33b99f4b23ba3eec1a53ac264e35a755f00e966e0065077d6027c0f575b0b97"
+checksum = "3bdd9ef4e984da1187bf8110c5cf5b845fbc87a23602cdf912386a76fcd3a7c2"
dependencies = [
"js-sys",
"wasm-bindgen",
@@ -6568,21 +6486,6 @@ dependencies = [
"windows-targets 0.48.0",
]
-[[package]]
-name = "windows-sys"
-version = "0.42.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7"
-dependencies = [
- "windows_aarch64_gnullvm 0.42.2",
- "windows_aarch64_msvc 0.42.2",
- "windows_i686_gnu 0.42.2",
- "windows_i686_msvc 0.42.2",
- "windows_x86_64_gnu 0.42.2",
- "windows_x86_64_gnullvm 0.42.2",
- "windows_x86_64_msvc 0.42.2",
-]
-
[[package]]
name = "windows-sys"
version = "0.45.0"
@@ -6717,9 +6620,9 @@ checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a"
[[package]]
name = "winnow"
-version = "0.4.1"
+version = "0.4.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ae8970b36c66498d8ff1d66685dc86b91b29db0c7739899012f63a63814b4b28"
+checksum = "61de7bac303dc551fe038e2b3cef0f571087a47571ea6e79a87692ac99b99699"
dependencies = [
"memchr",
]
@@ -6768,7 +6671,6 @@ dependencies = [
"futures-task",
"futures-util",
"getrandom",
- "hashbrown 0.12.3",
"hashbrown 0.13.2",
"heck",
"indexmap",
@@ -6812,7 +6714,7 @@ dependencies = [
"sqlx-core",
"sqlx-macros",
"syn 1.0.109",
- "syn 2.0.15",
+ "syn 2.0.16",
"thrift",
"tokio",
"tokio-stream",
diff --git a/Cargo.toml b/Cargo.toml
index 832a1c56ee..032740d288 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -117,8 +117,8 @@ license = "MIT OR Apache-2.0"
[workspace.dependencies]
arrow = { version = "39.0.0" }
arrow-flight = { version = "39.0.0" }
-datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev="19b03240920ad63cac916b42951754c0337bdac8", default-features = false }
-datafusion-proto = { git = "https://github.com/apache/arrow-datafusion.git", rev="19b03240920ad63cac916b42951754c0337bdac8" }
+datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev="c24830a898b5f79f9058624fbcea7ba3c66ff065", default-features = false }
+datafusion-proto = { git = "https://github.com/apache/arrow-datafusion.git", rev="c24830a898b5f79f9058624fbcea7ba3c66ff065" }
hashbrown = { version = "0.13.2" }
parquet = { version = "39.0.0" }
tonic = { version = "0.9.2", features = ["tls", "tls-webpki-roots"] }
diff --git a/influxdb_iox/tests/query_tests/cases/in/duplicates_parquet_20.sql.expected b/influxdb_iox/tests/query_tests/cases/in/duplicates_parquet_20.sql.expected
index d4c59b4416..2e648981d8 100644
--- a/influxdb_iox/tests/query_tests/cases/in/duplicates_parquet_20.sql.expected
+++ b/influxdb_iox/tests/query_tests/cases/in/duplicates_parquet_20.sql.expected
@@ -20,6 +20,6 @@
| | ProjectionExec: expr=[f@1 as f] |
| | DeduplicateExec: [tag@2 ASC,time@3 ASC] |
| | SortPreservingMergeExec: [tag@2 ASC,time@3 ASC,__chunk_order@0 ASC] |
-| | ParquetExec: file_groups={10 groups: [[1/1/1/00000000-0000-0000-0000-00000000000a.parquet], [1/1/1/00000000-0000-0000-0000-00000000000b.parquet], [1/1/1/00000000-0000-0000-0000-00000000000c.parquet], [1/1/1/00000000-0000-0000-0000-00000000000d.parquet], [1/1/1/00000000-0000-0000-0000-00000000000e.parquet], [1/1/1/00000000-0000-0000-0000-00000000000f.parquet], [1/1/1/00000000-0000-0000-0000-000000000010.parquet], [1/1/1/00000000-0000-0000-0000-000000000011.parquet], [1/1/1/00000000-0000-0000-0000-000000000012.parquet], [1/1/1/00000000-0000-0000-0000-000000000013.parquet]]}, projection=[__chunk_order, f, tag, time], output_ordering=[tag@2 ASC, time@3 ASC, __chunk_order@0 ASC] |
+| | ParquetExec: file_groups={10 groups: [[1/1/1/00000000-0000-0000-0000-00000000000a.parquet], [1/1/1/00000000-0000-0000-0000-00000000000b.parquet], [1/1/1/00000000-0000-0000-0000-00000000000c.parquet], [1/1/1/00000000-0000-0000-0000-00000000000d.parquet], [1/1/1/00000000-0000-0000-0000-00000000000e.parquet], ...]}, projection=[__chunk_order, f, tag, time], output_ordering=[tag@2 ASC, time@3 ASC, __chunk_order@0 ASC] |
| | |
----------
\ No newline at end of file
diff --git a/influxdb_iox/tests/query_tests/cases/in/duplicates_parquet_20_and_ingester.sql.expected b/influxdb_iox/tests/query_tests/cases/in/duplicates_parquet_20_and_ingester.sql.expected
index d0c77a20b6..a6a4ef65da 100644
--- a/influxdb_iox/tests/query_tests/cases/in/duplicates_parquet_20_and_ingester.sql.expected
+++ b/influxdb_iox/tests/query_tests/cases/in/duplicates_parquet_20_and_ingester.sql.expected
@@ -23,6 +23,6 @@
| | UnionExec |
| | SortExec: expr=[tag@2 ASC,time@3 ASC,__chunk_order@0 ASC] |
| | RecordBatchesExec: batches_groups=1 batches=1 total_rows=1 |
-| | ParquetExec: file_groups={10 groups: [[1/1/1/00000000-0000-0000-0000-00000000000a.parquet], [1/1/1/00000000-0000-0000-0000-00000000000b.parquet], [1/1/1/00000000-0000-0000-0000-00000000000c.parquet], [1/1/1/00000000-0000-0000-0000-00000000000d.parquet], [1/1/1/00000000-0000-0000-0000-00000000000e.parquet], [1/1/1/00000000-0000-0000-0000-00000000000f.parquet], [1/1/1/00000000-0000-0000-0000-000000000010.parquet], [1/1/1/00000000-0000-0000-0000-000000000011.parquet], [1/1/1/00000000-0000-0000-0000-000000000012.parquet], [1/1/1/00000000-0000-0000-0000-000000000013.parquet]]}, projection=[__chunk_order, f, tag, time], output_ordering=[tag@2 ASC, time@3 ASC, __chunk_order@0 ASC] |
+| | ParquetExec: file_groups={10 groups: [[1/1/1/00000000-0000-0000-0000-00000000000a.parquet], [1/1/1/00000000-0000-0000-0000-00000000000b.parquet], [1/1/1/00000000-0000-0000-0000-00000000000c.parquet], [1/1/1/00000000-0000-0000-0000-00000000000d.parquet], [1/1/1/00000000-0000-0000-0000-00000000000e.parquet], ...]}, projection=[__chunk_order, f, tag, time], output_ordering=[tag@2 ASC, time@3 ASC, __chunk_order@0 ASC] |
| | |
----------
\ No newline at end of file
diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml
index 7e8a7aad58..af3b433e63 100644
--- a/workspace-hack/Cargo.toml
+++ b/workspace-hack/Cargo.toml
@@ -30,9 +30,9 @@ bytes = { version = "1" }
chrono = { version = "0.4", default-features = false, features = ["alloc", "clock", "serde"] }
crossbeam-utils = { version = "0.8" }
crypto-common = { version = "0.1", default-features = false, features = ["std"] }
-datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "19b03240920ad63cac916b42951754c0337bdac8" }
-datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "19b03240920ad63cac916b42951754c0337bdac8", default-features = false, features = ["crypto_expressions", "regex_expressions", "unicode_expressions"] }
-datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "19b03240920ad63cac916b42951754c0337bdac8", default-features = false, features = ["crypto_expressions", "regex_expressions", "unicode_expressions"] }
+datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "c24830a898b5f79f9058624fbcea7ba3c66ff065" }
+datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "c24830a898b5f79f9058624fbcea7ba3c66ff065", default-features = false, features = ["crypto_expressions", "regex_expressions", "unicode_expressions"] }
+datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "c24830a898b5f79f9058624fbcea7ba3c66ff065", default-features = false, features = ["crypto_expressions", "regex_expressions", "unicode_expressions"] }
digest = { version = "0.10", features = ["mac", "std"] }
either = { version = "1" }
fixedbitset = { version = "0.4" }
@@ -46,8 +46,7 @@ futures-sink = { version = "0.3" }
futures-task = { version = "0.3", default-features = false, features = ["std"] }
futures-util = { version = "0.3", features = ["channel", "io", "sink"] }
getrandom = { version = "0.2", default-features = false, features = ["std"] }
-hashbrown-594e8ee84c453af0 = { package = "hashbrown", version = "0.13", features = ["raw"] }
-hashbrown-5ef9efb8ec2df382 = { package = "hashbrown", version = "0.12", features = ["raw"] }
+hashbrown = { version = "0.13", features = ["raw"] }
indexmap = { version = "1", default-features = false, features = ["std"] }
itertools = { version = "0.10" }
libc = { version = "0.2", features = ["extra_traits"] }
@@ -99,6 +98,7 @@ zstd-safe = { version = "6", default-features = false, features = ["arrays", "le
zstd-sys = { version = "2", default-features = false, features = ["legacy", "std", "zdict_builder"] }
[build-dependencies]
+ahash = { version = "0.8", default-features = false, features = ["runtime-rng"] }
base64-594e8ee84c453af0 = { package = "base64", version = "0.13" }
base64-647d43efb71741da = { package = "base64", version = "0.21" }
bitflags = { version = "1" }
@@ -118,7 +118,7 @@ futures-sink = { version = "0.3" }
futures-task = { version = "0.3", default-features = false, features = ["std"] }
futures-util = { version = "0.3", features = ["channel", "io", "sink"] }
getrandom = { version = "0.2", default-features = false, features = ["std"] }
-hashbrown-5ef9efb8ec2df382 = { package = "hashbrown", version = "0.12", features = ["raw"] }
+hashbrown = { version = "0.13", features = ["raw"] }
heck = { version = "0.4", features = ["unicode"] }
indexmap = { version = "1", default-features = false, features = ["std"] }
itertools = { version = "0.10" }
@@ -201,7 +201,7 @@ rustls = { version = "0.21", features = ["dangerous_configuration"] }
scopeguard = { version = "1" }
webpki = { version = "0.22", default-features = false, features = ["std"] }
winapi = { version = "0.3", default-features = false, features = ["basetsd", "consoleapi", "errhandlingapi", "fileapi", "handleapi", "impl-debug", "impl-default", "knownfolders", "minwinbase", "minwindef", "ntsecapi", "ntstatus", "objbase", "processenv", "shellapi", "shlobj", "std", "stringapiset", "synchapi", "timezoneapi", "winbase", "wincon", "winerror", "winnt", "winreg", "winuser", "ws2ipdef", "ws2tcpip", "wtypesbase"] }
-windows-sys-53888c27b7ba5cf4 = { package = "windows-sys", version = "0.45", features = ["Win32_Foundation", "Win32_Networking_WinSock", "Win32_Security", "Win32_Storage_FileSystem", "Win32_System_Console", "Win32_System_IO", "Win32_System_LibraryLoader", "Win32_System_Pipes", "Win32_System_SystemServices", "Win32_System_WindowsProgramming"] }
+windows-sys-53888c27b7ba5cf4 = { package = "windows-sys", version = "0.45", features = ["Win32_Foundation", "Win32_Networking_WinSock", "Win32_Security", "Win32_Storage_FileSystem", "Win32_System_Console", "Win32_System_IO", "Win32_System_LibraryLoader", "Win32_System_Pipes", "Win32_System_SystemServices", "Win32_System_WindowsProgramming", "Win32_UI_Input_KeyboardAndMouse"] }
windows-sys-c8eced492e86ede7 = { package = "windows-sys", version = "0.48", features = ["Win32_Foundation", "Win32_Networking_WinSock", "Win32_Security", "Win32_Storage_FileSystem", "Win32_System_Console", "Win32_System_IO", "Win32_System_Pipes", "Win32_System_SystemServices", "Win32_System_Threading", "Win32_System_WindowsProgramming", "Win32_UI_Shell"] }
[target.x86_64-pc-windows-msvc.build-dependencies]
@@ -209,7 +209,7 @@ once_cell = { version = "1", default-features = false, features = ["unstable"] }
scopeguard = { version = "1" }
webpki = { version = "0.22", default-features = false, features = ["std"] }
winapi = { version = "0.3", default-features = false, features = ["basetsd", "consoleapi", "errhandlingapi", "fileapi", "handleapi", "impl-debug", "impl-default", "knownfolders", "minwinbase", "minwindef", "ntsecapi", "ntstatus", "objbase", "processenv", "shellapi", "shlobj", "std", "stringapiset", "synchapi", "timezoneapi", "winbase", "wincon", "winerror", "winnt", "winreg", "winuser", "ws2ipdef", "ws2tcpip", "wtypesbase"] }
-windows-sys-53888c27b7ba5cf4 = { package = "windows-sys", version = "0.45", features = ["Win32_Foundation", "Win32_Networking_WinSock", "Win32_Security", "Win32_Storage_FileSystem", "Win32_System_Console", "Win32_System_IO", "Win32_System_LibraryLoader", "Win32_System_Pipes", "Win32_System_SystemServices", "Win32_System_WindowsProgramming"] }
+windows-sys-53888c27b7ba5cf4 = { package = "windows-sys", version = "0.45", features = ["Win32_Foundation", "Win32_Networking_WinSock", "Win32_Security", "Win32_Storage_FileSystem", "Win32_System_Console", "Win32_System_IO", "Win32_System_LibraryLoader", "Win32_System_Pipes", "Win32_System_SystemServices", "Win32_System_WindowsProgramming", "Win32_UI_Input_KeyboardAndMouse"] }
windows-sys-c8eced492e86ede7 = { package = "windows-sys", version = "0.48", features = ["Win32_Foundation", "Win32_Networking_WinSock", "Win32_Security", "Win32_Storage_FileSystem", "Win32_System_Console", "Win32_System_IO", "Win32_System_Pipes", "Win32_System_SystemServices", "Win32_System_Threading", "Win32_System_WindowsProgramming", "Win32_UI_Shell"] }
### END HAKARI SECTION
|
13ce6da3df3e363214770b6a1a5b81c8ac728fdb
|
Marco Neumann
|
2023-02-13 17:25:05
|
extract `FileClassifer` component (#6946)
|
* refactor: extract `FileClassifer` component
Make the driver slightly smaller. Also makes the "all-in-one" mode
easier to understand.
* docs: add some
---------
|
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
refactor: extract `FileClassifer` component (#6946)
* refactor: extract `FileClassifer` component
Make the driver slightly smaller. Also makes the "all-in-one" mode
easier to understand.
* docs: add some
---------
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
diff --git a/compactor2/src/components/file_classifier/all_at_once.rs b/compactor2/src/components/file_classifier/all_at_once.rs
new file mode 100644
index 0000000000..57ea8652e9
--- /dev/null
+++ b/compactor2/src/components/file_classifier/all_at_once.rs
@@ -0,0 +1,125 @@
+use std::fmt::Display;
+
+use data_types::{CompactionLevel, ParquetFile};
+
+use crate::{file_classification::FileClassification, partition_info::PartitionInfo};
+
+use super::FileClassifier;
+
+/// All files of level 0 and level 1 will be classified in one group to get compacted to level 1.
+#[derive(Debug, Default)]
+pub struct AllAtOnceFileClassifier;
+
+impl AllAtOnceFileClassifier {
+ pub fn new() -> Self {
+ Self::default()
+ }
+}
+
+impl Display for AllAtOnceFileClassifier {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ write!(f, "all_at_once")
+ }
+}
+
+impl FileClassifier for AllAtOnceFileClassifier {
+ fn classify(
+ &self,
+ _partition_info: &PartitionInfo,
+ files: Vec<ParquetFile>,
+ ) -> FileClassification {
+ // Check if there are files in Compaction::Initial level
+ if !files
+ .iter()
+ .any(|file| file.compaction_level == CompactionLevel::Initial)
+ {
+ panic!("Level-0 file not found in target level detection");
+ }
+
+ FileClassification {
+ target_level: CompactionLevel::FileNonOverlapped,
+ files_to_compact: files,
+ files_to_upgrade: vec![],
+ files_to_keep: vec![],
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use compactor2_test_utils::create_overlapped_files;
+ use iox_tests::ParquetFileBuilder;
+
+ use crate::test_utils::partition_info;
+
+ use super::*;
+
+ #[test]
+ fn test_display() {
+ assert_eq!(AllAtOnceFileClassifier::new().to_string(), "all_at_once",);
+ }
+
+ #[test]
+ #[should_panic(expected = "Level-0 file not found in target level detection")]
+ fn test_apply_empty() {
+ let classifier = AllAtOnceFileClassifier::new();
+
+ classifier.classify(&partition_info(), vec![]);
+ }
+
+ #[test]
+ #[should_panic(expected = "Level-0 file not found in target level detection")]
+ fn test_only_l1() {
+ let classifier = AllAtOnceFileClassifier::new();
+
+ let f1 = ParquetFileBuilder::new(1)
+ .with_compaction_level(CompactionLevel::FileNonOverlapped)
+ .build();
+
+ classifier.classify(&partition_info(), vec![f1]);
+ }
+
+ #[test]
+ #[should_panic(expected = "Level-0 file not found in target level detection")]
+ fn test_only_l2() {
+ let classifier = AllAtOnceFileClassifier::new();
+
+ let f2 = ParquetFileBuilder::new(2)
+ .with_compaction_level(CompactionLevel::Final)
+ .build();
+
+ classifier.classify(&partition_info(), vec![f2]);
+ }
+
+ #[test]
+ #[should_panic(expected = "Level-0 file not found in target level detection")]
+ fn test_only_l1_l2() {
+ let classifier = AllAtOnceFileClassifier::new();
+
+ let f1 = ParquetFileBuilder::new(1)
+ .with_compaction_level(CompactionLevel::FileNonOverlapped)
+ .build();
+
+ let f2 = ParquetFileBuilder::new(2)
+ .with_compaction_level(CompactionLevel::Final)
+ .build();
+
+ classifier.classify(&partition_info(), vec![f1, f2]);
+ }
+
+ #[test]
+ fn test_apply() {
+ let classifier = AllAtOnceFileClassifier::new();
+ let files = create_overlapped_files();
+ let classification = classifier.classify(&partition_info(), files.clone());
+ assert_eq!(
+ classification,
+ FileClassification {
+ target_level: CompactionLevel::FileNonOverlapped,
+ files_to_compact: files,
+ files_to_keep: vec![],
+ files_to_upgrade: vec![],
+ }
+ );
+ }
+}
diff --git a/compactor2/src/components/file_classifier/logging.rs b/compactor2/src/components/file_classifier/logging.rs
new file mode 100644
index 0000000000..d374ccbcc9
--- /dev/null
+++ b/compactor2/src/components/file_classifier/logging.rs
@@ -0,0 +1,58 @@
+use std::fmt::Display;
+
+use data_types::ParquetFile;
+use observability_deps::tracing::info;
+
+use crate::{file_classification::FileClassification, partition_info::PartitionInfo};
+
+use super::FileClassifier;
+
+#[derive(Debug)]
+pub struct LoggingFileClassifierWrapper<T>
+where
+ T: FileClassifier,
+{
+ inner: T,
+}
+
+impl<T> LoggingFileClassifierWrapper<T>
+where
+ T: FileClassifier,
+{
+ pub fn new(inner: T) -> Self {
+ Self { inner }
+ }
+}
+
+impl<T> Display for LoggingFileClassifierWrapper<T>
+where
+ T: FileClassifier,
+{
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ write!(f, "display({})", self.inner)
+ }
+}
+
+impl<T> FileClassifier for LoggingFileClassifierWrapper<T>
+where
+ T: FileClassifier,
+{
+ fn classify(
+ &self,
+ partition_info: &PartitionInfo,
+ files: Vec<ParquetFile>,
+ ) -> FileClassification {
+ let classification = self.inner.classify(partition_info, files);
+
+ info!(
+ partition_id = partition_info.partition_id.get(),
+ target_level = %classification.target_level,
+ files_to_compacts = classification.files_to_compact.len(),
+ files_to_upgrade = classification.files_to_upgrade.len(),
+ files_to_keep = classification.files_to_keep.len(),
+ "file classification"
+ );
+
+ classification
+ }
+}
diff --git a/compactor2/src/components/file_classifier/mod.rs b/compactor2/src/components/file_classifier/mod.rs
new file mode 100644
index 0000000000..de8d49ffed
--- /dev/null
+++ b/compactor2/src/components/file_classifier/mod.rs
@@ -0,0 +1,33 @@
+use std::{
+ fmt::{Debug, Display},
+ sync::Arc,
+};
+
+use data_types::ParquetFile;
+
+use crate::{file_classification::FileClassification, partition_info::PartitionInfo};
+
+pub mod all_at_once;
+pub mod logging;
+pub mod split_based;
+
+pub trait FileClassifier: Debug + Display + Send + Sync {
+ fn classify(
+ &self,
+ partition_info: &PartitionInfo,
+ files: Vec<ParquetFile>,
+ ) -> FileClassification;
+}
+
+impl<T> FileClassifier for Arc<T>
+where
+ T: FileClassifier + ?Sized,
+{
+ fn classify(
+ &self,
+ partition_info: &PartitionInfo,
+ files: Vec<ParquetFile>,
+ ) -> FileClassification {
+ self.as_ref().classify(partition_info, files)
+ }
+}
diff --git a/compactor2/src/components/file_classifier/split_based.rs b/compactor2/src/components/file_classifier/split_based.rs
new file mode 100644
index 0000000000..567ec64b46
--- /dev/null
+++ b/compactor2/src/components/file_classifier/split_based.rs
@@ -0,0 +1,149 @@
+use std::fmt::Display;
+
+use data_types::ParquetFile;
+
+use crate::{
+ components::{files_split::FilesSplit, target_level_chooser::TargetLevelChooser},
+ file_classification::FileClassification,
+ partition_info::PartitionInfo,
+};
+
+use super::FileClassifier;
+
+/// Use a combination of [`TargetLevelChooser`] and [`FilesSplit`] to build a [`FileClassification`].
+///
+/// This uses the following data flow:
+///
+/// ```text
+/// (files)--------------+->[target level chooser (T)]--->(target level)
+/// | :
+/// | :
+/// | +................................+
+/// | : :
+/// | : :
+/// V V :
+/// [target level split (FT)] :
+/// | | :
+/// | | :
+/// | +------------+ :
+/// | | :
+/// | | :
+/// | +............|...................+
+/// | : | :
+/// V V | :
+/// [non overlap split (FO)] | :
+/// | | | :
+/// | | | :
+/// | +------------+-->(files keep) :
+/// | :
+/// | :
+/// | +................................+
+/// | :
+/// V V
+/// [upgrade split (FU)]
+/// | |
+/// | |
+/// V V
+/// (file compact) (file upgrade)
+/// ```
+#[derive(Debug)]
+pub struct SplitBasedFileClassifier<T, FT, FO, FU>
+where
+ T: TargetLevelChooser,
+ FT: FilesSplit,
+ FO: FilesSplit,
+ FU: FilesSplit,
+{
+ target_level_chooser: T,
+ target_level_split: FT,
+ non_overlap_split: FO,
+ upgrade_split: FU,
+}
+
+impl<T, FT, FO, FU> SplitBasedFileClassifier<T, FT, FO, FU>
+where
+ T: TargetLevelChooser,
+ FT: FilesSplit,
+ FO: FilesSplit,
+ FU: FilesSplit,
+{
+ pub fn new(
+ target_level_chooser: T,
+ target_level_split: FT,
+ non_overlap_split: FO,
+ upgrade_split: FU,
+ ) -> Self {
+ Self {
+ target_level_chooser,
+ target_level_split,
+ non_overlap_split,
+ upgrade_split,
+ }
+ }
+}
+
+impl<T, FT, FO, FU> Display for SplitBasedFileClassifier<T, FT, FO, FU>
+where
+ T: TargetLevelChooser,
+ FT: FilesSplit,
+ FO: FilesSplit,
+ FU: FilesSplit,
+{
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ write!(
+ f,
+ "split_based(target_level_chooser={}, target_level_split={}, non_overlap_split={}, upgrade_split={})",
+ self.target_level_chooser,
+ self.target_level_split,
+ self.non_overlap_split,
+ self.upgrade_split,
+ )
+ }
+}
+
+impl<T, FT, FO, FU> FileClassifier for SplitBasedFileClassifier<T, FT, FO, FU>
+where
+ T: TargetLevelChooser,
+ FT: FilesSplit,
+ FO: FilesSplit,
+ FU: FilesSplit,
+{
+ fn classify(
+ &self,
+ _partition_info: &PartitionInfo,
+ files: Vec<ParquetFile>,
+ ) -> FileClassification {
+ let files_to_compact = files;
+
+ // Detect target level to compact to
+ let target_level = self.target_level_chooser.detect(&files_to_compact);
+
+ // Split files into files_to_compact, files_to_upgrade, and files_to_keep
+ //
+ // Since output of one compaction is used as input of next compaction, all files that are not
+ // compacted or upgraded are still kept to consider in next round of compaction
+
+ // Split actual files to compact from its higher-target-level files
+ // The higher-target-level files are kept for next round of compaction
+ let (files_to_compact, mut files_to_keep) = self
+ .target_level_split
+ .apply(files_to_compact, target_level);
+
+ // To have efficient compaction performance, we do not need to compact eligible non-overlapped files
+ // Find eligible non-overlapped files and keep for next round of compaction
+ let (files_to_compact, non_overlapping_files) =
+ self.non_overlap_split.apply(files_to_compact, target_level);
+ files_to_keep.extend(non_overlapping_files);
+
+ // To have efficient compaction performance, we only need to uprade (catalog update only) eligible files
+ let (files_to_compact, files_to_upgrade) =
+ self.upgrade_split.apply(files_to_compact, target_level);
+
+ FileClassification {
+ target_level,
+ files_to_compact,
+ files_to_upgrade,
+ files_to_keep,
+ }
+ }
+}
diff --git a/compactor2/src/components/files_split/all_at_once_non_overlap_split.rs b/compactor2/src/components/files_split/all_at_once_non_overlap_split.rs
deleted file mode 100644
index fd8be4e35e..0000000000
--- a/compactor2/src/components/files_split/all_at_once_non_overlap_split.rs
+++ /dev/null
@@ -1,77 +0,0 @@
-use std::fmt::Display;
-
-use data_types::{CompactionLevel, ParquetFile};
-
-use super::FilesSplit;
-
-#[derive(Debug)]
-/// In AllAtOnce version, we will compact all files at once and do not split anything
-pub struct AllAtOnceNonOverlapSplit {}
-
-impl AllAtOnceNonOverlapSplit {
- pub fn new() -> Self {
- Self {}
- }
-}
-
-impl Display for AllAtOnceNonOverlapSplit {
- fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
- write!(f, "Non-overlapping split for AllAtOnce version")
- }
-}
-
-impl FilesSplit for AllAtOnceNonOverlapSplit {
- fn apply(
- &self,
- files: Vec<data_types::ParquetFile>,
- _target_level: CompactionLevel,
- ) -> (Vec<ParquetFile>, Vec<ParquetFile>) {
- (files, vec![])
- }
-}
-
-#[cfg(test)]
-mod tests {
-
- use compactor2_test_utils::create_overlapped_files;
-
- use super::*;
-
- #[test]
- fn test_display() {
- assert_eq!(
- AllAtOnceNonOverlapSplit::new().to_string(),
- "Non-overlapping split for AllAtOnce version"
- );
- }
-
- #[test]
- fn test_apply_empty_files() {
- let files = vec![];
- let split = AllAtOnceNonOverlapSplit::new();
-
- let (overlap, non_overlap) = split.apply(files, CompactionLevel::FileNonOverlapped);
- assert_eq!(overlap.len(), 0);
- assert_eq!(non_overlap.len(), 0);
- }
-
- #[test]
- fn test_apply() {
- // Create 8 files with all levels
- let files = create_overlapped_files();
- assert_eq!(files.len(), 8);
-
- let split = AllAtOnceNonOverlapSplit::new();
- let (overlap, non_overlap) = split.apply(files.clone(), CompactionLevel::Initial);
- assert_eq!(overlap.len(), 8);
- assert_eq!(non_overlap.len(), 0);
-
- let (overlap, non_overlap) = split.apply(files.clone(), CompactionLevel::FileNonOverlapped);
- assert_eq!(overlap.len(), 8);
- assert_eq!(non_overlap.len(), 0);
-
- let (overlap, non_overlap) = split.apply(files, CompactionLevel::Final);
- assert_eq!(overlap.len(), 8);
- assert_eq!(non_overlap.len(), 0);
- }
-}
diff --git a/compactor2/src/components/files_split/all_at_once_target_level_split.rs b/compactor2/src/components/files_split/all_at_once_target_level_split.rs
deleted file mode 100644
index e6bcc33b60..0000000000
--- a/compactor2/src/components/files_split/all_at_once_target_level_split.rs
+++ /dev/null
@@ -1,77 +0,0 @@
-use std::fmt::Display;
-
-use data_types::{CompactionLevel, ParquetFile};
-
-use super::FilesSplit;
-
-#[derive(Debug)]
-/// In AllAtOnce version, we will compact all files at once and do not split anything
-pub struct AllAtOnceTargetLevelSplit {}
-
-impl AllAtOnceTargetLevelSplit {
- pub fn new() -> Self {
- Self {}
- }
-}
-
-impl Display for AllAtOnceTargetLevelSplit {
- fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
- write!(f, "Target level split for AllAtOnce version")
- }
-}
-
-impl FilesSplit for AllAtOnceTargetLevelSplit {
- fn apply(
- &self,
- files: Vec<data_types::ParquetFile>,
- _target_level: CompactionLevel,
- ) -> (Vec<ParquetFile>, Vec<ParquetFile>) {
- (files, vec![])
- }
-}
-
-#[cfg(test)]
-mod tests {
-
- use compactor2_test_utils::create_overlapped_files;
-
- use super::*;
-
- #[test]
- fn test_display() {
- assert_eq!(
- AllAtOnceTargetLevelSplit::new().to_string(),
- "Target level split for AllAtOnce version"
- );
- }
-
- #[test]
- fn test_apply_empty_files() {
- let files = vec![];
- let split = AllAtOnceTargetLevelSplit::new();
-
- let (lower, higher) = split.apply(files, CompactionLevel::FileNonOverlapped);
- assert_eq!(lower.len(), 0);
- assert_eq!(higher.len(), 0);
- }
-
- #[test]
- fn test_apply() {
- // Create 8 files with all levels
- let files = create_overlapped_files();
- assert_eq!(files.len(), 8);
-
- let split = AllAtOnceTargetLevelSplit::new();
- let (lower, higher) = split.apply(files.clone(), CompactionLevel::Initial);
- assert_eq!(lower.len(), 8);
- assert_eq!(higher.len(), 0);
-
- let (lower, higher) = split.apply(files.clone(), CompactionLevel::FileNonOverlapped);
- assert_eq!(lower.len(), 8);
- assert_eq!(higher.len(), 0);
-
- let (lower, higher) = split.apply(files, CompactionLevel::Final);
- assert_eq!(lower.len(), 8);
- assert_eq!(higher.len(), 0);
- }
-}
diff --git a/compactor2/src/components/files_split/all_at_once_upgrade_split.rs b/compactor2/src/components/files_split/all_at_once_upgrade_split.rs
deleted file mode 100644
index db3811e72c..0000000000
--- a/compactor2/src/components/files_split/all_at_once_upgrade_split.rs
+++ /dev/null
@@ -1,77 +0,0 @@
-use std::fmt::Display;
-
-use data_types::{CompactionLevel, ParquetFile};
-
-use super::FilesSplit;
-
-#[derive(Debug)]
-/// A [`FilesSplit`] that considers compacting all files each round
-pub struct AllAtOnceUpgradeSplit {}
-
-impl AllAtOnceUpgradeSplit {
- pub fn new() -> Self {
- Self {}
- }
-}
-
-impl Display for AllAtOnceUpgradeSplit {
- fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
- write!(f, "Upgrade split for AllAtOnce version")
- }
-}
-
-impl FilesSplit for AllAtOnceUpgradeSplit {
- fn apply(
- &self,
- files: Vec<data_types::ParquetFile>,
- _target_level: CompactionLevel,
- ) -> (Vec<ParquetFile>, Vec<ParquetFile>) {
- (files, vec![])
- }
-}
-
-#[cfg(test)]
-mod tests {
-
- use compactor2_test_utils::create_overlapped_files;
-
- use super::*;
-
- #[test]
- fn test_display() {
- assert_eq!(
- AllAtOnceUpgradeSplit::new().to_string(),
- "Upgrade split for AllAtOnce version"
- );
- }
-
- #[test]
- fn test_apply_empty_files() {
- let files = vec![];
- let split = AllAtOnceUpgradeSplit::new();
-
- let (compact, upgrade) = split.apply(files, CompactionLevel::FileNonOverlapped);
- assert_eq!(compact.len(), 0);
- assert_eq!(upgrade.len(), 0);
- }
-
- #[test]
- fn test_apply() {
- // Create 8 files with all levels
- let files = create_overlapped_files();
- assert_eq!(files.len(), 8);
-
- let split = AllAtOnceUpgradeSplit::new();
- let (compact, upgrade) = split.apply(files.clone(), CompactionLevel::Initial);
- assert_eq!(compact.len(), 8);
- assert_eq!(upgrade.len(), 0);
-
- let (compact, upgrade) = split.apply(files.clone(), CompactionLevel::FileNonOverlapped);
- assert_eq!(compact.len(), 8);
- assert_eq!(upgrade.len(), 0);
-
- let (compact, upgrade) = split.apply(files, CompactionLevel::Final);
- assert_eq!(compact.len(), 8);
- assert_eq!(upgrade.len(), 0);
- }
-}
diff --git a/compactor2/src/components/files_split/mod.rs b/compactor2/src/components/files_split/mod.rs
index bb75fa61cb..819154ff64 100644
--- a/compactor2/src/components/files_split/mod.rs
+++ b/compactor2/src/components/files_split/mod.rs
@@ -2,9 +2,6 @@ use std::fmt::{Debug, Display};
use data_types::{CompactionLevel, ParquetFile};
-pub mod all_at_once_non_overlap_split;
-pub mod all_at_once_target_level_split;
-pub mod all_at_once_upgrade_split;
pub mod target_level_non_overlap_split;
pub mod target_level_target_level_split;
pub mod target_level_upgrade_split;
diff --git a/compactor2/src/components/hardcoded.rs b/compactor2/src/components/hardcoded.rs
index 6a29350695..1a13731fb1 100644
--- a/compactor2/src/components/hardcoded.rs
+++ b/compactor2/src/components/hardcoded.rs
@@ -28,15 +28,16 @@ use super::{
},
df_planner::planner_v1::V1DataFusionPlanner,
divide_initial::single_branch::SingleBranchDivideInitial,
+ file_classifier::{
+ all_at_once::AllAtOnceFileClassifier, logging::LoggingFileClassifierWrapper,
+ split_based::SplitBasedFileClassifier, FileClassifier,
+ },
file_filter::{and::AndFileFilter, level_range::LevelRangeFileFilter},
files_filter::{chain::FilesFilterChain, per_file::PerFileFilesFilter, FilesFilter},
files_split::{
- all_at_once_non_overlap_split::AllAtOnceNonOverlapSplit,
- all_at_once_target_level_split::AllAtOnceTargetLevelSplit,
- all_at_once_upgrade_split::AllAtOnceUpgradeSplit,
target_level_non_overlap_split::TargetLevelNonOverlapSplit,
target_level_target_level_split::TargetLevelTargetLevelSplit,
- target_level_upgrade_split::TargetLevelUpgradeSplit, FilesSplit,
+ target_level_upgrade_split::TargetLevelUpgradeSplit,
},
id_only_partition_filter::{
and::AndIdOnlyPartitionFilter, shard::ShardPartitionFilter, IdOnlyPartitionFilter,
@@ -83,10 +84,7 @@ use super::{
round_split::all_now::AllNowRoundSplit,
scratchpad::{noop::NoopScratchpadGen, prod::ProdScratchpadGen, ScratchpadGen},
skipped_compactions_source::catalog::CatalogSkippedCompactionsSource,
- target_level_chooser::{
- all_at_once::AllAtOnceTargetLevelChooser, target_level::TargetLevelTargetLevelChooser,
- TargetLevelChooser,
- },
+ target_level_chooser::target_level::TargetLevelTargetLevelChooser,
Components,
};
@@ -308,10 +306,9 @@ pub fn hardcoded_components(config: &Config) -> Arc<Components> {
round_split: Arc::new(AllNowRoundSplit::new()),
divide_initial: Arc::new(SingleBranchDivideInitial::new()),
scratchpad_gen,
- target_level_chooser: version_specific_target_level_chooser(config),
- target_level_split: version_specific_target_level_split(config),
- non_overlap_split: version_specific_non_ovverlapping_split(config),
- upgrade_split: version_specific_upgrade_split(config),
+ file_classifier: Arc::new(LoggingFileClassifierWrapper::new(
+ version_specific_file_classifier(config),
+ )),
partition_resource_limit_filter: Arc::new(LoggingPartitionFilterWrapper::new(
MetricsPartitionFilterWrapper::new(
AndPartitionFilter::new(partition_resource_limit_filters),
@@ -357,38 +354,14 @@ fn version_specific_partition_filters(config: &Config) -> Vec<Arc<dyn PartitionF
}
}
-// Choose the terget level to compact to
-fn version_specific_target_level_chooser(config: &Config) -> Arc<dyn TargetLevelChooser> {
- match config.compact_version {
- AlgoVersion::AllAtOnce => Arc::new(AllAtOnceTargetLevelChooser::new()),
- AlgoVersion::TargetLevel => {
- Arc::new(TargetLevelTargetLevelChooser::new(OneLevelExist::new()))
- }
- }
-}
-
-// Split the files into `[<=target level]` and `[>target level]`
-fn version_specific_target_level_split(config: &Config) -> Arc<dyn FilesSplit> {
+fn version_specific_file_classifier(config: &Config) -> Arc<dyn FileClassifier> {
match config.compact_version {
- AlgoVersion::AllAtOnce => Arc::new(AllAtOnceTargetLevelSplit::new()),
- AlgoVersion::TargetLevel => Arc::new(TargetLevelTargetLevelSplit::new()),
- }
-}
-
-// Split the files into `[overlapping]` and `[non_overlapping]`
-fn version_specific_non_ovverlapping_split(config: &Config) -> Arc<dyn FilesSplit> {
- match config.compact_version {
- AlgoVersion::AllAtOnce => Arc::new(AllAtOnceNonOverlapSplit::new()),
- AlgoVersion::TargetLevel => Arc::new(TargetLevelNonOverlapSplit::new()),
- }
-}
-
-// Split the files into `[files_to_compact]` and `[files_to_upgrade]`
-fn version_specific_upgrade_split(config: &Config) -> Arc<dyn FilesSplit> {
- match config.compact_version {
- AlgoVersion::AllAtOnce => Arc::new(AllAtOnceUpgradeSplit::new()),
- AlgoVersion::TargetLevel => Arc::new(TargetLevelUpgradeSplit::new(
- config.max_desired_file_size_bytes,
+ AlgoVersion::AllAtOnce => Arc::new(AllAtOnceFileClassifier::new()),
+ AlgoVersion::TargetLevel => Arc::new(SplitBasedFileClassifier::new(
+ TargetLevelTargetLevelChooser::new(OneLevelExist::new()),
+ TargetLevelTargetLevelSplit::new(),
+ TargetLevelNonOverlapSplit::new(),
+ TargetLevelUpgradeSplit::new(config.max_desired_file_size_bytes),
)),
}
}
diff --git a/compactor2/src/components/mod.rs b/compactor2/src/components/mod.rs
index 07de69a8b5..a4d2c16057 100644
--- a/compactor2/src/components/mod.rs
+++ b/compactor2/src/components/mod.rs
@@ -2,11 +2,11 @@ use std::sync::Arc;
use self::{
commit::Commit, df_plan_exec::DataFusionPlanExec, df_planner::DataFusionPlanner,
- divide_initial::DivideInitial, files_filter::FilesFilter, ir_planner::IRPlanner,
- parquet_files_sink::ParquetFilesSink, partition_done_sink::PartitionDoneSink,
- partition_files_source::PartitionFilesSource, partition_filter::PartitionFilter,
- partition_info_source::PartitionInfoSource, partition_stream::PartitionStream,
- round_split::RoundSplit, scratchpad::ScratchpadGen, target_level_chooser::TargetLevelChooser,
+ divide_initial::DivideInitial, file_classifier::FileClassifier, files_filter::FilesFilter,
+ ir_planner::IRPlanner, parquet_files_sink::ParquetFilesSink,
+ partition_done_sink::PartitionDoneSink, partition_files_source::PartitionFilesSource,
+ partition_filter::PartitionFilter, partition_info_source::PartitionInfoSource,
+ partition_stream::PartitionStream, round_split::RoundSplit, scratchpad::ScratchpadGen,
};
pub mod combos;
@@ -14,6 +14,7 @@ pub mod commit;
pub mod df_plan_exec;
pub mod df_planner;
pub mod divide_initial;
+pub mod file_classifier;
pub mod file_filter;
pub mod files_filter;
pub mod files_split;
@@ -73,12 +74,6 @@ pub struct Components {
pub divide_initial: Arc<dyn DivideInitial>,
/// Create intermediate temporary storage
pub scratchpad_gen: Arc<dyn ScratchpadGen>,
- /// Return the target compaction level for files
- pub target_level_chooser: Arc<dyn TargetLevelChooser>,
- /// Splits files based on their current compaction level and the target level.
- pub target_level_split: Arc<dyn files_split::FilesSplit>,
- /// Which files overlap and which do not
- pub non_overlap_split: Arc<dyn files_split::FilesSplit>,
- /// Which files should be upgraded and which should not
- pub upgrade_split: Arc<dyn files_split::FilesSplit>,
+ /// Classify files for each compaction branch.
+ pub file_classifier: Arc<dyn FileClassifier>,
}
diff --git a/compactor2/src/components/report.rs b/compactor2/src/components/report.rs
index ef8dd8848f..0a278afcff 100644
--- a/compactor2/src/components/report.rs
+++ b/compactor2/src/components/report.rs
@@ -100,10 +100,7 @@ pub fn log_components(components: &Components) {
round_split,
divide_initial,
scratchpad_gen,
- target_level_chooser,
- target_level_split,
- non_overlap_split,
- upgrade_split,
+ file_classifier,
} = components;
info!(
@@ -122,10 +119,7 @@ pub fn log_components(components: &Components) {
%round_split,
%divide_initial,
%scratchpad_gen,
- %target_level_chooser,
- %target_level_split,
- %non_overlap_split,
- %upgrade_split,
+ %file_classifier,
"component setup",
);
}
diff --git a/compactor2/src/components/target_level_chooser/all_at_once.rs b/compactor2/src/components/target_level_chooser/all_at_once.rs
deleted file mode 100644
index 0d97b2e6e6..0000000000
--- a/compactor2/src/components/target_level_chooser/all_at_once.rs
+++ /dev/null
@@ -1,134 +0,0 @@
-use std::fmt::Display;
-
-use data_types::CompactionLevel;
-
-use super::TargetLevelChooser;
-
-#[derive(Debug)]
-pub struct AllAtOnceTargetLevelChooser {}
-
-impl AllAtOnceTargetLevelChooser {
- pub fn new() -> Self {
- Self {}
- }
-}
-
-impl Display for AllAtOnceTargetLevelChooser {
- fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
- write!(f, "Target level detection for AllAtOnce version",)
- }
-}
-
-impl TargetLevelChooser for AllAtOnceTargetLevelChooser {
- // For AllAtOnce version, we only compact (L0s + L1s) to L1s
- // The target level is always 1 and there must be at least one file in L0
- fn detect(&self, files: &[data_types::ParquetFile]) -> CompactionLevel {
- // Check if there are files in Compaction::Initial level
- if files
- .iter()
- .any(|file| file.compaction_level == CompactionLevel::Initial)
- {
- return CompactionLevel::FileNonOverlapped;
- }
-
- panic!("Level-0 file not found in target level detection");
- }
-}
-
-#[cfg(test)]
-mod tests {
- use iox_tests::ParquetFileBuilder;
-
- use super::*;
-
- #[test]
- fn test_display() {
- assert_eq!(
- AllAtOnceTargetLevelChooser::new().to_string(),
- "Target level detection for AllAtOnce version"
- );
- }
-
- #[test]
- #[should_panic(expected = "Level-0 file not found in target level detection")]
- fn test_apply_empty() {
- let target_level_chooser = AllAtOnceTargetLevelChooser::new();
-
- target_level_chooser.detect(&[]);
- }
-
- #[test]
- #[should_panic(expected = "Level-0 file not found in target level detection")]
- fn test_only_l1() {
- let target_level_chooser = AllAtOnceTargetLevelChooser::new();
-
- let f1 = ParquetFileBuilder::new(1)
- .with_compaction_level(CompactionLevel::FileNonOverlapped)
- .build();
-
- target_level_chooser.detect(&[f1]);
- }
-
- #[test]
- #[should_panic(expected = "Level-0 file not found in target level detection")]
- fn test_only_l2() {
- let target_level_chooser = AllAtOnceTargetLevelChooser::new();
-
- let f2 = ParquetFileBuilder::new(2)
- .with_compaction_level(CompactionLevel::Final)
- .build();
-
- target_level_chooser.detect(&[f2]);
- }
-
- #[test]
- #[should_panic(expected = "Level-0 file not found in target level detection")]
- fn test_only_l1_l2() {
- let target_level_chooser = AllAtOnceTargetLevelChooser::new();
-
- let f1 = ParquetFileBuilder::new(1)
- .with_compaction_level(CompactionLevel::FileNonOverlapped)
- .build();
-
- let f2 = ParquetFileBuilder::new(2)
- .with_compaction_level(CompactionLevel::Final)
- .build();
-
- target_level_chooser.detect(&[f1, f2]);
- }
-
- #[test]
- fn test_apply() {
- let target_level_chooser = AllAtOnceTargetLevelChooser::new();
-
- let f0 = ParquetFileBuilder::new(0)
- .with_compaction_level(CompactionLevel::Initial)
- .build();
- let f1 = ParquetFileBuilder::new(1)
- .with_compaction_level(CompactionLevel::FileNonOverlapped)
- .build();
- let f2 = ParquetFileBuilder::new(2)
- .with_compaction_level(CompactionLevel::Final)
- .build();
-
- // list of one
- assert_eq!(
- target_level_chooser.detect(&[f0.clone()]),
- CompactionLevel::FileNonOverlapped
- );
-
- // list of many
- assert_eq!(
- target_level_chooser.detect(&[f1.clone(), f0.clone()]),
- CompactionLevel::FileNonOverlapped
- );
- assert_eq!(
- target_level_chooser.detect(&[f2.clone(), f0.clone()]),
- CompactionLevel::FileNonOverlapped
- );
- assert_eq!(
- target_level_chooser.detect(&[f2, f0, f1]),
- CompactionLevel::FileNonOverlapped
- );
- }
-}
diff --git a/compactor2/src/components/target_level_chooser/mod.rs b/compactor2/src/components/target_level_chooser/mod.rs
index 2e1fbd09b7..92716a96f7 100644
--- a/compactor2/src/components/target_level_chooser/mod.rs
+++ b/compactor2/src/components/target_level_chooser/mod.rs
@@ -2,7 +2,6 @@ use std::fmt::{Debug, Display};
use data_types::CompactionLevel;
-pub mod all_at_once;
pub mod target_level;
pub trait TargetLevelChooser: Debug + Display + Send + Sync {
diff --git a/compactor2/src/driver.rs b/compactor2/src/driver.rs
index 626b8f7fac..acdc83e5f4 100644
--- a/compactor2/src/driver.rs
+++ b/compactor2/src/driver.rs
@@ -218,7 +218,7 @@ async fn try_compact_partition(
// Identify the target level and files that should be
// compacted together, upgraded, and kept for next round of
// compaction
- let compaction_plan = build_compaction_plan(branch, Arc::clone(&components))?;
+ let file_classification = components.file_classifier.classify(partition_info, branch);
// Cannot run this plan and skip this partition because of over limit of input num_files or size.
// The partition_resource_limit_filter will throw an error if one of the limits hit and will lead
@@ -229,7 +229,7 @@ async fn try_compact_partition(
// of conidtions even with limited resource. Then we will remove this resrouce limit check.
if !components
.partition_resource_limit_filter
- .apply(partition_id, &compaction_plan.files_to_compact)
+ .apply(partition_id, &file_classification.files_to_compact)
.await?
{
return Ok(());
@@ -237,10 +237,10 @@ async fn try_compact_partition(
// Compact
let created_file_params = run_compaction_plan(
- &compaction_plan.files_to_compact,
+ &file_classification.files_to_compact,
partition_info,
&components,
- compaction_plan.target_level,
+ file_classification.target_level,
Arc::clone(&job_semaphore),
scratchpad_ctx,
)
@@ -258,106 +258,23 @@ async fn try_compact_partition(
let (created_files, upgraded_files) = update_catalog(
Arc::clone(&components),
partition_id,
- compaction_plan.files_to_compact,
- compaction_plan.files_to_upgrade,
+ file_classification.files_to_compact,
+ file_classification.files_to_upgrade,
created_file_params,
- compaction_plan.target_level,
+ file_classification.target_level,
)
.await;
// Extend created files, upgraded files and files_to_keep to files_next
files_next.extend(created_files);
files_next.extend(upgraded_files);
- files_next.extend(compaction_plan.files_to_keep);
+ files_next.extend(file_classification.files_to_keep);
}
files = files_next;
}
}
-/// A CompactionPlan specifies the parameters for a single, which may
-/// generate one or more new parquet files. It includes the target
-/// [`CompactionLevel`], the specific files that should be compacted
-/// together to form new file(s), files that should be upgraded
-/// without chainging, files that should be left unmodified.
-struct CompactionPlan {
- /// The target level of file resulting from compaction
- target_level: CompactionLevel,
- /// Files which should be compacted into a new single parquet
- /// file, often the small and/or overlapped files
- files_to_compact: Vec<ParquetFile>,
- /// Non-overlapped files that should be upgraded to the target
- /// level without rewriting (for example they are of sufficient
- /// size)
- files_to_upgrade: Vec<ParquetFile>,
- /// files which should not be modified. For example,
- /// non-overlapped or higher-target-level files
- files_to_keep: Vec<ParquetFile>,
-}
-
-/// Build [`CompactionPlan`] for a for a given set of files.
-///
-/// # Example:
-///
-/// . Input:
-/// |--L0.1--| |--L0.2--| |--L0.3--| |--L0.4--| --L0.5--|
-/// |--L1.1--| |--L1.2--| |--L1.3--| |--L1.4--|
-/// |---L2.1--|
-///
-/// .Output
-/// . target_level = 1
-/// . files_to_keep = [L2.1, L1.1, L1.4]
-/// . files_to_upgrade = [L0.1, L0.5]
-/// . files_to_compact = [L0.2, L0.3, L0.4, L1.2, L1.3]
-///
-fn build_compaction_plan(
- files: Vec<ParquetFile>,
- components: Arc<Components>,
-) -> Result<CompactionPlan, DynError> {
- let files_to_compact = files;
-
- // Detect target level to compact to
- let target_level = components.target_level_chooser.detect(&files_to_compact);
-
- // Split files into files_to_compact, files_to_upgrade, and files_to_keep
- //
- // Since output of one compaction is used as input of next compaction, all files that are not
- // compacted or upgraded are still kept to consider in next round of compaction
-
- // Split actual files to compact from its higher-target-level files
- // The higher-target-level files are kept for next round of compaction
- let (files_to_compact, mut files_to_keep) = components
- .target_level_split
- .apply(files_to_compact, target_level);
-
- // To have efficient compaction performance, we do not need to compact eligible non-overlapped files
- // Find eligible non-overlapped files and keep for next round of compaction
- let (files_to_compact, non_overlapping_files) = components
- .non_overlap_split
- .apply(files_to_compact, target_level);
- files_to_keep.extend(non_overlapping_files);
-
- // To have efficient compaction performance, we only need to uprade (catalog update only) eligible files
- let (files_to_compact, files_to_upgrade) = components
- .upgrade_split
- .apply(files_to_compact, target_level);
-
- info!(
- target_level = target_level.to_string(),
- files_to_compacts = files_to_compact.len(),
- files_to_upgrade = files_to_upgrade.len(),
- files_to_keep = files_to_keep.len(),
- "Compaction Plan"
- );
-
- Ok(CompactionPlan {
- target_level,
- files_to_compact,
- files_to_upgrade,
- files_to_keep,
- })
-}
-
/// Compact `files` into a new parquet file of the the given target_level
async fn run_compaction_plan(
files: &[ParquetFile],
diff --git a/compactor2/src/file_classification.rs b/compactor2/src/file_classification.rs
new file mode 100644
index 0000000000..9ba72687b8
--- /dev/null
+++ b/compactor2/src/file_classification.rs
@@ -0,0 +1,27 @@
+use data_types::{CompactionLevel, ParquetFile};
+
+/// A file classification specifies the parameters for a single compaction branch.
+///
+/// This may
+/// generate one or more new parquet files. It includes the target
+/// [`CompactionLevel`], the specific files that should be compacted
+/// together to form new file(s), files that should be upgraded
+/// without chainging, files that should be left unmodified.
+#[derive(Debug, PartialEq, Eq)]
+pub struct FileClassification {
+ /// The target level of file resulting from compaction
+ pub target_level: CompactionLevel,
+
+ /// Files which should be compacted into a new single parquet
+ /// file, often the small and/or overlapped files
+ pub files_to_compact: Vec<ParquetFile>,
+
+ /// Non-overlapped files that should be upgraded to the target
+ /// level without rewriting (for example they are of sufficient
+ /// size)
+ pub files_to_upgrade: Vec<ParquetFile>,
+
+ /// files which should not be modified. For example,
+ /// non-overlapped or higher-target-level files
+ pub files_to_keep: Vec<ParquetFile>,
+}
diff --git a/compactor2/src/lib.rs b/compactor2/src/lib.rs
index d795cb7c27..17fbc4f835 100644
--- a/compactor2/src/lib.rs
+++ b/compactor2/src/lib.rs
@@ -175,6 +175,7 @@ mod components;
pub mod config;
mod driver;
mod error;
+mod file_classification;
pub mod object_store;
mod partition_info;
mod plan_ir;
|
2a8731dd9041a697c48f4d45b402c3e9ebb5f85d
|
Dom Dwyer
|
2023-03-01 13:52:44
|
ingester overview documentation
|
Adds "overview" documentation for the ingester, including the high-level
purpose & design. Each subsystem is briefly documented, with links for
jumping-off points into more specific documentation.
| null |
docs: ingester overview documentation
Adds "overview" documentation for the ingester, including the high-level
purpose & design. Each subsystem is briefly documented, with links for
jumping-off points into more specific documentation.
|
diff --git a/ingester2/src/lib.rs b/ingester2/src/lib.rs
index 36b736f0a8..5e3f2749b8 100644
--- a/ingester2/src/lib.rs
+++ b/ingester2/src/lib.rs
@@ -1,12 +1,159 @@
//! IOx Ingester V2 implementation.
//!
+//! # Overview
+//!
+//! The purpose of the ingester is to receive RPC write requests, and batch them
+//! up until ready to be persisted. This buffered data must be durable (tolerant
+//! of a crash) and queryable via the Flight RPC query interface. Data is
+//! organised in a hierarchical structure (the [`BufferTree`]) to allow
+//! table-scoped queries, and partition-scoped persistence / parquet file
+//! generation.
+//!
+//! All buffered data is periodically persisted via a WAL file rotation, or
+//! selectively when a single partition has grown too large ("hot partition
+//! persistence").
+//!
+//! ```text
+//!
+//! ┌──────────────┐ ┌──────────────┐
+//! │ RPC Write │ │ RPC Query │
+//! └──────────────┘ └──────────────┘
+//! │ ▲
+//! ▼ │
+//! ┌──────────────┐ │
+//! │ WAL │ │
+//! └──────────────┘ │
+//! │ │
+//! │ ┌──────────────┐ │
+//! └──▶│ BufferTree │────┘
+//! └──────────────┘
+//! │
+//! ▼
+//! ┌──────────────┐
+//! │ Persist │
+//! └──────────────┘
+//! │
+//! ▼
+//!
+//! Object Storage
+//!
+//!
+//!
+//! (arrows show data flow)
+//!```
+//!
+//! The write path is composed together of implementers of the [`DmlSink`]
+//! abstraction, and likewise queries stream out of the [`QueryExec`]
+//! abstraction.
+//!
+//!
+//! ## Subsystems
+//!
+//! The Ingester is composed of multiple smaller, distinct subsystems that
+//! communicate / work together to provide the full ingester functionality.
+//!
+//! Each subsystem has its own documentation further describing the behaviours
+//! and problems it solves in more detail.
+//!
+//!
+//! ### [`BufferTree`]
+//!
+//! Perhaps not a "system" as such, but a single instance of the [`BufferTree`]
+//! is the central point of the ingester - all writes are buffered in the tree,
+//! and all queries execute against it. Persist operations persist data in the
+//! buffer tree, removing it once complete.
+//!
+//! All other systems either directly or indirectly operate on, or control
+//! operations against the [`BufferTree`].
+//!
+//!
+//! ### RPC Server
+//!
+//! External services communicate with the ingester via gRPC service calls. All
+//! gRPC handlers can be found in the [`grpc`] modules.
+//!
+//! The two main RPC endpoints are:
+//!
+//! * Write: buffer new data in the ingester, issued by the router
+//! * Query: return buffered data scoped by {namespace, table}, issued by the
+//! queriers
+//!
+//! Both endpoints are latency sensitive, and the call durations are directly
+//! observable by end users.
+//!
+//! Writes commit to the [`wal`] for durability / crash tolerance and are
+//! buffered into the [`BufferTree`] in parallel. Queries execute against the
+//! [`BufferTree`], lazily streaming the results back to the queriers (lock
+//! acquisition and data copying is deferred until "pulled" by the querier).
+//!
+//! If the [`IngestState`] is marked as unhealthy/shutting down, then write
+//! requests are rejected with a "resource exhausted" error message until it
+//! becomes healthy again.
+//!
+//!
+//! ### Persist System
+//!
+//! The persist system is responsible for durably writing data to object
+//! storage; that includes compacting it (to remove duplicate/overwrote rows),
+//! generating a parquet file, uploading it to the configured object store, and
+//! inserting the necessary catalog state to make the new file queryable.
+//!
+//! Data is typically sourced from a [`BufferTree`], and once the new file is
+//! queryable, the data it contains is removed from the [`BufferTree`].
+//!
+//! Code that uses the persist system does so through the [`PersistQueue`]
+//! abstraction, implemented by the [`PersistHandle`].
+//!
+//! The persist system provides a logical queue of outstanding persist jobs, and
+//! a configurable number of worker tasks to execute them - see
+//! [`PersistHandle`] for detailed documentation. If the persist system is
+//! "saturated" (queue depth reached maximum) then further writes are rejected
+//! by setting the [`IngestState`] to [`IngestStateError::PersistSaturated`]
+//! until the queue depth is reduced.
+//!
+//! The persist system is driven mainly by WAL rotation (below), and interacts
+//! with the [`IngestState`] to provide back-pressure, indirectly controlling
+//! the memory utilisation of the Ingester (to prevent OOMs). Partitions with
+//! large volumes of writes (or otherwise problematic data) are prematurely
+//! persisted independently of the WAL rotation ("hot partition persistence").
+//!
+//!
+//! ### WAL
+//!
+//! The write-ahead log ([`wal`]) is used to durably record each operation
+//! against the [`BufferTree`] into a replay log, with a partial order. This
+//! happens synchronously in the hot write path, so WAL performance is a
+//! critical consideration.
+//!
+//! Once a write has been buffered in the [`BufferTree`] AND flushed to disk in
+//! the [`wal`], the request is ACKed to the user.
+//!
+//! If the ingester crashes / stops un-cleanly, then the WAL must be replayed to
+//! rebuild the in-memory state (the [`BufferTree`]) to prevent data loss. This
+//! has some quirks, discussed in "Write Reordering" below.
+//!
+//!
+//! #### WAL Rotation
+//!
+//! The WAL file is periodically rotated at a configurable interval, which
+//! triggers a full persistence of all buffered data in the ingester at the same
+//! time. This "full persist" indirectly affects the ingester in many ways:
+//!
+//! * Limits the size of a single WAL file
+//! * Limits the amount of buffered data, which in turn
+//! * Limits the amount of WAL data to replay after a crash
+//! * Limits the amount of data a querier must read & dedupe per query
+//! * Limits the largest source of memory utilisation in the ingester
+//! * Limits the amount of data in a partition that must be persisted
+//!
+//!
//! ## Write Reordering
//!
//! A write that enters an `ingester2` instance can be reordered arbitrarily
//! with concurrent write requests.
//!
-//! For example, two gRPC writes can race to be committed to the WAL, and then
-//! race again to be buffered into the [`BufferTree`]. Writes to a
+//! For example, two gRPC writes can race to be committed to the [`wal`], and
+//! then race again to be buffered into the [`BufferTree`]. Writes to a
//! [`BufferTree`] may arrive out-of-order w.r.t their assigned sequence
//! numbers.
//!
@@ -29,6 +176,14 @@
//!
//! [`BufferTree`]: crate::buffer_tree::BufferTree
//! [`SequenceNumber`]: data_types::SequenceNumber
+//! [`PersistQueue`]: crate::persist::queue::PersistQueue
+//! [`PersistHandle`]: crate::persist::handle::PersistHandle
+//! [`IngestState`]: crate::ingest_state::IngestState
+//! [`grpc`]: crate::server::grpc
+//! [`DmlSink`]: crate::dml_sink::DmlSink
+//! [`QueryExec`]: crate::query::QueryExec
+//! [`IngestStateError::PersistSaturated`]:
+//! crate::ingest_state::IngestStateError
#![allow(dead_code)] // Until ingester2 is used.
#![deny(rustdoc::broken_intra_doc_links, rust_2018_idioms)]
|
361a82a84aa80c7e01cdc77f8e4b67cd01bf2622
|
Jamie Strandboge
|
2023-10-26 10:53:42
|
ignore Go in .github/dependabot.yml (#24430)
|
Before switching to rust-based IOx, influxdb was a Go project which
dependabot tracked. After the switch, dependabot would issue alerts for
go files that no longer exist. Tell dependabot to ignore "gomod"
| null |
chore: ignore Go in .github/dependabot.yml (#24430)
Before switching to rust-based IOx, influxdb was a Go project which
dependabot tracked. After the switch, dependabot would issue alerts for
go files that no longer exist. Tell dependabot to ignore "gomod"
|
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
index 7fd92c20cc..284c1bfa79 100644
--- a/.github/dependabot.yml
+++ b/.github/dependabot.yml
@@ -20,3 +20,8 @@ updates:
- dependency-name: "parquet"
- dependency-name: "datafusion"
- dependency-name: "datafusion-*"
+# Before switching to rust-based IOx, influxdb was a Go project which
+# dependabot tracked. After the switch, dependabot would issue alerts for go
+# files that no longer exist. Tell dependabot to ignore "gomod"
+ignore:
+ - package-ecosystem: "gomod"
|
e3b802cd25f37d50e41fe8aedf43ed48d00d7838
|
Marco Neumann
|
2023-03-31 10:01:33
|
"parquet sortness" optimizer pass (#7383)
|
* feat: "parquet sortness" optimizer pass
Trade wider fan-out for the not having to fully sort parquet files.
For #6098.
* test: rename
Co-authored-by: Andrew Lamb <[email protected]>
---------
|
Co-authored-by: Andrew Lamb <[email protected]>
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
feat: "parquet sortness" optimizer pass (#7383)
* feat: "parquet sortness" optimizer pass
Trade wider fan-out for the not having to fully sort parquet files.
For #6098.
* test: rename
Co-authored-by: Andrew Lamb <[email protected]>
---------
Co-authored-by: Andrew Lamb <[email protected]>
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
diff --git a/iox_query/src/config.rs b/iox_query/src/config.rs
index 82d2299a5c..2471ac96b3 100644
--- a/iox_query/src/config.rs
+++ b/iox_query/src/config.rs
@@ -22,6 +22,22 @@ extensions_options! {
///
/// This protects against certain highly degenerative plans.
pub max_dedup_time_split: usize, default = 100
+
+ /// When multiple parquet files are required in a sorted way (e.g. for de-duplication), we have two options:
+ ///
+ /// 1. **In-mem sorting:** Put them into [`target_partitions`] DataFusion partitions. This limits the fan-out,
+ /// but requires that we potentially chain multiple parquet files into a single DataFusion partition. Since
+ /// chaining sorted data does NOT automatically result in sorted data (e.g. AB-AB is not sorted), we need to
+ /// preform an in-memory sort using [`SortExec`] afterwards. This is expensive.
+ /// 2. **Fan-out:** Instead of chaining files within DataFusion partitions, we can accept a fan-out beyond
+ /// [`target_partitions`]. This prevents in-memory sorting but may result in OOMs (out-of-memory).
+ ///
+ /// We try to pick option 2 up to a certain number of files, which is configured by this setting.
+ ///
+ ///
+ /// [`SortExec`]: datafusion::physical_plan::sorts::sort::SortExec
+ /// [`target_partitions`]: datafusion::common::config::ExecutionOptions::target_partitions
+ pub max_parquet_fanout: usize, default = 40
}
}
diff --git a/iox_query/src/physical_optimizer/sort/mod.rs b/iox_query/src/physical_optimizer/sort/mod.rs
index acc8642a12..be610d51d3 100644
--- a/iox_query/src/physical_optimizer/sort/mod.rs
+++ b/iox_query/src/physical_optimizer/sort/mod.rs
@@ -2,5 +2,6 @@
//!
//! [`SortExec`]: datafusion::physical_plan::sorts::sort::SortExec
+pub mod parquet_sortness;
pub mod redundant_sort;
pub mod sort_pushdown;
diff --git a/iox_query/src/physical_optimizer/sort/parquet_sortness.rs b/iox_query/src/physical_optimizer/sort/parquet_sortness.rs
new file mode 100644
index 0000000000..66f96b5f14
--- /dev/null
+++ b/iox_query/src/physical_optimizer/sort/parquet_sortness.rs
@@ -0,0 +1,448 @@
+use std::sync::Arc;
+
+use datafusion::{
+ common::tree_node::{Transformed, TreeNode},
+ config::ConfigOptions,
+ error::Result,
+ physical_optimizer::PhysicalOptimizerRule,
+ physical_plan::{
+ file_format::{FileScanConfig, ParquetExec},
+ sorts::sort::SortExec,
+ ExecutionPlan,
+ },
+};
+use observability_deps::tracing::warn;
+
+use crate::config::IoxConfigExt;
+
+/// Trade wider fan-out of not having to sort parquet files.
+///
+/// This will fan-out [`ParquetExec`] nodes beyond [`target_partitions`] if it is under a [`SortExec`]. You should
+/// likely run [`RedundantSort`] afterwards to eliminate the [`SortExec`].
+///
+///
+/// [`RedundantSort`]: super::redundant_sort::RedundantSort
+/// [`target_partitions`]: datafusion::common::config::ExecutionOptions::target_partitions
+#[derive(Debug, Default)]
+pub struct ParquetSortness;
+
+impl PhysicalOptimizerRule for ParquetSortness {
+ fn optimize(
+ &self,
+ plan: Arc<dyn ExecutionPlan>,
+ config: &ConfigOptions,
+ ) -> Result<Arc<dyn ExecutionPlan>> {
+ plan.transform_down(&|plan| {
+ let Some(sort_exec) = plan.as_any().downcast_ref::<SortExec>() else {
+ return Ok(Transformed::No(plan));
+ };
+
+ let transformed_child = Arc::clone(sort_exec.input()).transform_down(&|plan| {
+ let Some(parquet_exec) = plan.as_any().downcast_ref::<ParquetExec>() else {
+ return Ok(Transformed::No(plan));
+ };
+
+ let base_config = parquet_exec.base_config();
+ if base_config.output_ordering.is_none() {
+ // no output ordering requested
+ return Ok(Transformed::No(plan));
+ }
+
+ if base_config.file_groups.iter().all(|g| g.len() < 2) {
+ // already flat
+ return Ok(Transformed::No(plan));
+ }
+
+ // Protect against degenerative plans
+ let n_files = base_config.file_groups.iter().map(Vec::len).sum::<usize>();
+ let max_parquet_fanout = config
+ .extensions
+ .get::<IoxConfigExt>()
+ .cloned()
+ .unwrap_or_default()
+ .max_parquet_fanout;
+ if n_files > max_parquet_fanout {
+ warn!(
+ n_files,
+ max_parquet_fanout, "cannot use pre-sorted parquet files, fan-out too wide"
+ );
+ return Ok(Transformed::No(plan));
+ }
+
+ let base_config = FileScanConfig {
+ file_groups: base_config
+ .file_groups
+ .iter()
+ .flat_map(|g| g.iter())
+ .map(|f| vec![f.clone()])
+ .collect(),
+ ..base_config.clone()
+ };
+ let new_parquet_exec =
+ ParquetExec::new(base_config, parquet_exec.predicate().cloned(), None);
+ Ok(Transformed::Yes(Arc::new(new_parquet_exec)))
+ })?;
+
+ if transformed_child.output_ordering() == Some(sort_exec.expr()) {
+ Ok(Transformed::Yes(
+ plan.with_new_children(vec![transformed_child])?,
+ ))
+ } else {
+ Ok(Transformed::No(plan))
+ }
+ })
+ }
+
+ fn name(&self) -> &str {
+ "parquet_sortness"
+ }
+
+ fn schema_check(&self) -> bool {
+ true
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use arrow::datatypes::{DataType, Field, Schema, SchemaRef};
+ use datafusion::{
+ datasource::{listing::PartitionedFile, object_store::ObjectStoreUrl},
+ physical_expr::PhysicalSortExpr,
+ physical_plan::{empty::EmptyExec, expressions::Column, Statistics},
+ };
+ use object_store::{path::Path, ObjectMeta};
+
+ use crate::physical_optimizer::test_util::{assert_unknown_partitioning, OptimizationTest};
+
+ use super::*;
+
+ #[test]
+ fn test_happy_path() {
+ let schema = schema();
+ let base_config = FileScanConfig {
+ object_store_url: ObjectStoreUrl::parse("test://").unwrap(),
+ file_schema: Arc::clone(&schema),
+ file_groups: vec![vec![file(1), file(2)]],
+ statistics: Statistics::default(),
+ projection: None,
+ limit: None,
+ table_partition_cols: vec![],
+ output_ordering: Some(ordering(["col2", "col1"], &schema)),
+ infinite_source: false,
+ };
+ let inner = ParquetExec::new(base_config, None, None);
+ let plan = Arc::new(
+ SortExec::try_new(
+ ordering(["col2", "col1"], &schema),
+ Arc::new(inner),
+ Some(42),
+ )
+ .unwrap(),
+ );
+ let opt = ParquetSortness::default();
+ insta::assert_yaml_snapshot!(
+ OptimizationTest::new(plan, opt),
+ @r###"
+ ---
+ input:
+ - " SortExec: fetch=42, expr=[col2@1 ASC,col1@0 ASC]"
+ - " ParquetExec: limit=None, partitions={1 group: [[1.parquet, 2.parquet]]}, projection=[col1, col2, col3]"
+ output:
+ Ok:
+ - " SortExec: fetch=42, expr=[col2@1 ASC,col1@0 ASC]"
+ - " ParquetExec: limit=None, partitions={2 groups: [[1.parquet], [2.parquet]]}, output_ordering=[col2@1 ASC, col1@0 ASC], projection=[col1, col2, col3]"
+ "###
+ );
+ }
+
+ #[test]
+ fn test_sort_partitioning() {
+ let schema = schema();
+ let base_config = FileScanConfig {
+ object_store_url: ObjectStoreUrl::parse("test://").unwrap(),
+ file_schema: Arc::clone(&schema),
+ file_groups: vec![vec![file(1), file(2)], vec![file(3)]],
+ statistics: Statistics::default(),
+ projection: None,
+ limit: None,
+ table_partition_cols: vec![],
+ output_ordering: Some(ordering(["col2", "col1"], &schema)),
+ infinite_source: false,
+ };
+ let inner = ParquetExec::new(base_config, None, None);
+ let plan = Arc::new(SortExec::new_with_partitioning(
+ ordering(["col2", "col1"], &schema),
+ Arc::new(inner),
+ true,
+ Some(42),
+ ));
+
+ assert_unknown_partitioning(plan.output_partitioning(), 2);
+
+ let opt = ParquetSortness::default();
+ let test = OptimizationTest::new(plan, opt);
+ insta::assert_yaml_snapshot!(
+ test,
+ @r###"
+ ---
+ input:
+ - " SortExec: fetch=42, expr=[col2@1 ASC,col1@0 ASC]"
+ - " ParquetExec: limit=None, partitions={2 groups: [[1.parquet, 2.parquet], [3.parquet]]}, projection=[col1, col2, col3]"
+ output:
+ Ok:
+ - " SortExec: fetch=42, expr=[col2@1 ASC,col1@0 ASC]"
+ - " ParquetExec: limit=None, partitions={3 groups: [[1.parquet], [2.parquet], [3.parquet]]}, output_ordering=[col2@1 ASC, col1@0 ASC], projection=[col1, col2, col3]"
+ "###
+ );
+
+ assert_unknown_partitioning(test.output_plan().unwrap().output_partitioning(), 3);
+ }
+
+ #[test]
+ fn test_parquet_already_flat() {
+ let schema = schema();
+ let base_config = FileScanConfig {
+ object_store_url: ObjectStoreUrl::parse("test://").unwrap(),
+ file_schema: Arc::clone(&schema),
+ file_groups: vec![vec![file(1)], vec![file(2)]],
+ statistics: Statistics::default(),
+ projection: None,
+ limit: None,
+ table_partition_cols: vec![],
+ output_ordering: Some(ordering(["col2", "col1"], &schema)),
+ infinite_source: false,
+ };
+ let inner = ParquetExec::new(base_config, None, None);
+ let plan = Arc::new(
+ SortExec::try_new(
+ ordering(["col2", "col1"], &schema),
+ Arc::new(inner),
+ Some(42),
+ )
+ .unwrap(),
+ );
+ let opt = ParquetSortness::default();
+ insta::assert_yaml_snapshot!(
+ OptimizationTest::new(plan, opt),
+ @r###"
+ ---
+ input:
+ - " SortExec: fetch=42, expr=[col2@1 ASC,col1@0 ASC]"
+ - " ParquetExec: limit=None, partitions={2 groups: [[1.parquet], [2.parquet]]}, output_ordering=[col2@1 ASC, col1@0 ASC], projection=[col1, col2, col3]"
+ output:
+ Ok:
+ - " SortExec: fetch=42, expr=[col2@1 ASC,col1@0 ASC]"
+ - " ParquetExec: limit=None, partitions={2 groups: [[1.parquet], [2.parquet]]}, output_ordering=[col2@1 ASC, col1@0 ASC], projection=[col1, col2, col3]"
+ "###
+ );
+ }
+
+ #[test]
+ fn test_parquet_has_different_ordering() {
+ let schema = schema();
+ let base_config = FileScanConfig {
+ object_store_url: ObjectStoreUrl::parse("test://").unwrap(),
+ file_schema: Arc::clone(&schema),
+ file_groups: vec![vec![file(1), file(2)]],
+ statistics: Statistics::default(),
+ projection: None,
+ limit: None,
+ table_partition_cols: vec![],
+ output_ordering: Some(ordering(["col1", "col2"], &schema)),
+ infinite_source: false,
+ };
+ let inner = ParquetExec::new(base_config, None, None);
+ let plan = Arc::new(
+ SortExec::try_new(
+ ordering(["col2", "col1"], &schema),
+ Arc::new(inner),
+ Some(42),
+ )
+ .unwrap(),
+ );
+ let opt = ParquetSortness::default();
+ insta::assert_yaml_snapshot!(
+ OptimizationTest::new(plan, opt),
+ @r###"
+ ---
+ input:
+ - " SortExec: fetch=42, expr=[col2@1 ASC,col1@0 ASC]"
+ - " ParquetExec: limit=None, partitions={1 group: [[1.parquet, 2.parquet]]}, projection=[col1, col2, col3]"
+ output:
+ Ok:
+ - " SortExec: fetch=42, expr=[col2@1 ASC,col1@0 ASC]"
+ - " ParquetExec: limit=None, partitions={1 group: [[1.parquet, 2.parquet]]}, projection=[col1, col2, col3]"
+ "###
+ );
+ }
+
+ #[test]
+ fn test_parquet_has_no_ordering() {
+ let schema = schema();
+ let base_config = FileScanConfig {
+ object_store_url: ObjectStoreUrl::parse("test://").unwrap(),
+ file_schema: Arc::clone(&schema),
+ file_groups: vec![vec![file(1), file(2)]],
+ statistics: Statistics::default(),
+ projection: None,
+ limit: None,
+ table_partition_cols: vec![],
+ output_ordering: None,
+ infinite_source: false,
+ };
+ let inner = ParquetExec::new(base_config, None, None);
+ let plan = Arc::new(
+ SortExec::try_new(
+ ordering(["col2", "col1"], &schema),
+ Arc::new(inner),
+ Some(42),
+ )
+ .unwrap(),
+ );
+ let opt = ParquetSortness::default();
+ insta::assert_yaml_snapshot!(
+ OptimizationTest::new(plan, opt),
+ @r###"
+ ---
+ input:
+ - " SortExec: fetch=42, expr=[col2@1 ASC,col1@0 ASC]"
+ - " ParquetExec: limit=None, partitions={1 group: [[1.parquet, 2.parquet]]}, projection=[col1, col2, col3]"
+ output:
+ Ok:
+ - " SortExec: fetch=42, expr=[col2@1 ASC,col1@0 ASC]"
+ - " ParquetExec: limit=None, partitions={1 group: [[1.parquet, 2.parquet]]}, projection=[col1, col2, col3]"
+ "###
+ );
+ }
+
+ #[test]
+ fn test_fanout_limit() {
+ let schema = schema();
+ let base_config = FileScanConfig {
+ object_store_url: ObjectStoreUrl::parse("test://").unwrap(),
+ file_schema: Arc::clone(&schema),
+ file_groups: vec![vec![file(1), file(2), file(3)]],
+ statistics: Statistics::default(),
+ projection: None,
+ limit: None,
+ table_partition_cols: vec![],
+ output_ordering: Some(ordering(["col2", "col1"], &schema)),
+ infinite_source: false,
+ };
+ let inner = ParquetExec::new(base_config, None, None);
+ let plan = Arc::new(
+ SortExec::try_new(
+ ordering(["col2", "col1"], &schema),
+ Arc::new(inner),
+ Some(42),
+ )
+ .unwrap(),
+ );
+ let opt = ParquetSortness::default();
+ let mut config = ConfigOptions::default();
+ config.extensions.insert(IoxConfigExt {
+ max_parquet_fanout: 2,
+ ..Default::default()
+ });
+ insta::assert_yaml_snapshot!(
+ OptimizationTest::new_with_config(plan, opt, &config),
+ @r###"
+ ---
+ input:
+ - " SortExec: fetch=42, expr=[col2@1 ASC,col1@0 ASC]"
+ - " ParquetExec: limit=None, partitions={1 group: [[1.parquet, 2.parquet, 3.parquet]]}, projection=[col1, col2, col3]"
+ output:
+ Ok:
+ - " SortExec: fetch=42, expr=[col2@1 ASC,col1@0 ASC]"
+ - " ParquetExec: limit=None, partitions={1 group: [[1.parquet, 2.parquet, 3.parquet]]}, projection=[col1, col2, col3]"
+ "###
+ );
+ }
+
+ #[test]
+ fn test_other_node() {
+ let schema = schema();
+ let inner = EmptyExec::new(true, Arc::clone(&schema));
+ let plan = Arc::new(
+ SortExec::try_new(
+ ordering(["col2", "col1"], &schema),
+ Arc::new(inner),
+ Some(42),
+ )
+ .unwrap(),
+ );
+ let opt = ParquetSortness::default();
+ insta::assert_yaml_snapshot!(
+ OptimizationTest::new(plan, opt),
+ @r###"
+ ---
+ input:
+ - " SortExec: fetch=42, expr=[col2@1 ASC,col1@0 ASC]"
+ - " EmptyExec: produce_one_row=true"
+ output:
+ Ok:
+ - " SortExec: fetch=42, expr=[col2@1 ASC,col1@0 ASC]"
+ - " EmptyExec: produce_one_row=true"
+ "###
+ );
+ }
+
+ #[test]
+ fn test_does_not_touch_freestanding_parquet_exec() {
+ let schema = schema();
+ let base_config = FileScanConfig {
+ object_store_url: ObjectStoreUrl::parse("test://").unwrap(),
+ file_schema: Arc::clone(&schema),
+ file_groups: vec![vec![file(1), file(2)]],
+ statistics: Statistics::default(),
+ projection: None,
+ limit: None,
+ table_partition_cols: vec![],
+ output_ordering: Some(ordering(["col2", "col1"], &schema)),
+ infinite_source: false,
+ };
+ let plan = Arc::new(ParquetExec::new(base_config, None, None));
+ let opt = ParquetSortness::default();
+ insta::assert_yaml_snapshot!(
+ OptimizationTest::new(plan, opt),
+ @r###"
+ ---
+ input:
+ - " ParquetExec: limit=None, partitions={1 group: [[1.parquet, 2.parquet]]}, projection=[col1, col2, col3]"
+ output:
+ Ok:
+ - " ParquetExec: limit=None, partitions={1 group: [[1.parquet, 2.parquet]]}, projection=[col1, col2, col3]"
+ "###
+ );
+ }
+
+ fn schema() -> SchemaRef {
+ Arc::new(Schema::new(vec![
+ Field::new("col1", DataType::Utf8, true),
+ Field::new("col2", DataType::Utf8, true),
+ Field::new("col3", DataType::Utf8, true),
+ ]))
+ }
+
+ fn file(n: u128) -> PartitionedFile {
+ PartitionedFile {
+ object_meta: ObjectMeta {
+ location: Path::parse(format!("{n}.parquet")).unwrap(),
+ last_modified: Default::default(),
+ size: 0,
+ },
+ partition_values: vec![],
+ range: None,
+ extensions: None,
+ }
+ }
+
+ fn ordering<const N: usize>(cols: [&str; N], schema: &SchemaRef) -> Vec<PhysicalSortExpr> {
+ cols.into_iter()
+ .map(|col| PhysicalSortExpr {
+ expr: Arc::new(Column::new_with_schema(col, schema.as_ref()).unwrap()),
+ options: Default::default(),
+ })
+ .collect()
+ }
+}
|
82e57ac76a60c13e0f9a0d218e1181db35dd2c9f
|
Paul Dix
|
2022-12-13 16:35:23
|
make data generator handle failed requests (#6397)
|
Updates the data generator to handle failed requests. Adds some println output to show progress along the way.
|
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
feat: make data generator handle failed requests (#6397)
Updates the data generator to handle failed requests. Adds some println output to show progress along the way.
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
diff --git a/iox_data_generator/benches/point_generation.rs b/iox_data_generator/benches/point_generation.rs
index c704f97360..e29af90530 100644
--- a/iox_data_generator/benches/point_generation.rs
+++ b/iox_data_generator/benches/point_generation.rs
@@ -192,6 +192,7 @@ agents = [{name = "foo", sampling_interval = "1s", count = 3}]
let expected_points = 30000;
let counter = Arc::new(AtomicU64::new(0));
+ let request_counter = Arc::new(AtomicU64::new(0));
let mut group = c.benchmark_group("agent_pre_generated");
group.measurement_time(std::time::Duration::from_secs(50));
group.throughput(Throughput::Elements(expected_points));
@@ -201,9 +202,14 @@ agents = [{name = "foo", sampling_interval = "1s", count = 3}]
agent.reset_current_date_time(0);
let points_writer =
Arc::new(points_writer.build_for_agent("foo", "foo", "foo").unwrap());
- let r = block_on(agent.generate_all(points_writer, 1, Arc::clone(&counter)));
+ let r = block_on(agent.generate_all(
+ points_writer,
+ 1,
+ Arc::clone(&counter),
+ Arc::clone(&request_counter),
+ ));
let n_points = r.expect("Could not generate data");
- assert_eq!(n_points, expected_points as usize);
+ assert_eq!(n_points.row_count, expected_points as usize);
})
});
}
diff --git a/iox_data_generator/src/agent.rs b/iox_data_generator/src/agent.rs
index caaa65d8bc..efcfdf7cea 100644
--- a/iox_data_generator/src/agent.rs
+++ b/iox_data_generator/src/agent.rs
@@ -15,7 +15,7 @@ use std::sync::{
Arc,
};
use std::time::{Duration, Instant};
-use tracing::{debug, info};
+use tracing::debug;
/// Agent-specific Results
pub type Result<T, E = Error> = std::result::Result<T, E>;
@@ -71,6 +71,46 @@ pub struct Agent {
interval: Option<tokio::time::Interval>,
}
+/// Basic stats for agents generating requests
+#[derive(Debug, Default, Copy, Clone)]
+pub struct AgentGenerateStats {
+ /// number of rows the agent has written
+ pub row_count: usize,
+ /// number of requests the agent has made
+ pub request_count: usize,
+ /// number of errors
+ pub error_count: usize,
+}
+
+impl AgentGenerateStats {
+ /// Display output for agent writing stats
+ pub fn display_stats(&self, elapsed_time: Duration) -> String {
+ if elapsed_time.as_secs() == 0 {
+ format!(
+ "made {} requests with {} rows in {:?} with {} errors for a {:.2} error rate",
+ self.request_count,
+ self.row_count,
+ elapsed_time,
+ self.error_count,
+ self.error_rate()
+ )
+ } else {
+ let req_secs = elapsed_time.as_secs();
+ let rows_per_sec = self.row_count as u64 / req_secs;
+ let reqs_per_sec = self.request_count as u64 / req_secs;
+ format!("made {} requests at {}/sec with {} rows at {}/sec in {:?} with {} errors for a {:.2} error rate",
+ self.request_count, reqs_per_sec, self.row_count, rows_per_sec, elapsed_time, self.error_count, self.error_rate())
+ }
+ }
+
+ fn error_rate(&self) -> f64 {
+ if self.error_count == 0 {
+ return 0.0;
+ }
+ self.error_count as f64 / self.request_count as f64 * 100.0
+ }
+}
+
impl Agent {
/// Create agents that will generate data points according to these
/// specs.
@@ -137,10 +177,11 @@ impl Agent {
points_writer: Arc<PointsWriter>,
batch_size: usize,
counter: Arc<AtomicU64>,
- ) -> Result<usize> {
+ request_counter: Arc<AtomicU64>,
+ ) -> Result<AgentGenerateStats> {
let mut points_this_batch = 1;
- let mut total_points = 0;
let start = Instant::now();
+ let mut stats = AgentGenerateStats::default();
while points_this_batch != 0 {
let batch_start = Instant::now();
@@ -158,35 +199,55 @@ impl Agent {
for s in &streams {
points_this_batch += s.line_count();
- total_points += s.line_count();
}
if points_this_batch == 0 && self.finished {
break;
}
- points_writer
+ stats.request_count += 1;
+ match points_writer
.write_points(streams.into_iter().flatten())
.await
- .context(CouldNotWritePointsSnafu)?;
-
- info!("wrote {} in {:?}", points_this_batch, batch_start.elapsed());
- let total = counter.fetch_add(points_this_batch as u64, Ordering::SeqCst);
- let secs = start.elapsed().as_secs();
- if secs != 0 {
- info!(
- "Agent {} written {} in {:?} for {}/sec. Aggregate {} in {}/sec",
- self.id,
- total_points,
- start.elapsed(),
- total_points / secs as usize,
- total,
- total / secs,
- )
+ .context(CouldNotWritePointsSnafu)
+ {
+ Ok(_) => {
+ stats.row_count += points_this_batch;
+
+ if stats.request_count % 10 == 0 {
+ println!(
+ "Agent {} wrote {} in {:?}",
+ self.id,
+ points_this_batch,
+ batch_start.elapsed()
+ );
+ }
+
+ // output something on the aggregate stats every 100 requests across all agents
+ let total_rows = counter.fetch_add(points_this_batch as u64, Ordering::SeqCst);
+ let total_requests = request_counter.fetch_add(1, Ordering::SeqCst);
+
+ if total_requests % 100 == 0 {
+ let secs = start.elapsed().as_secs();
+ if secs != 0 {
+ println!(
+ "{} rows written in {} requests for {} rows/sec and {} reqs/sec",
+ total_rows,
+ total_requests,
+ total_rows / secs,
+ total_requests / secs,
+ )
+ }
+ }
+ }
+ Err(e) => {
+ eprintln!("Error writing points: {}", e);
+ stats.error_count += 1;
+ }
}
}
- Ok(total_points)
+ Ok(stats)
}
/// Generate data points from the configuration in this agent.
diff --git a/iox_data_generator/src/bin/iox_data_generator.rs b/iox_data_generator/src/bin/iox_data_generator.rs
index 4eb0e3cbe0..e60a773aef 100644
--- a/iox_data_generator/src/bin/iox_data_generator.rs
+++ b/iox_data_generator/src/bin/iox_data_generator.rs
@@ -214,7 +214,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
eprintln!("Submitted {} total points", total_points);
}
}
- Err(e) => panic!("Execution failed: \n{}", e),
+ Err(e) => eprintln!("Execution failed: \n{}", e),
}
Ok(())
diff --git a/iox_data_generator/src/lib.rs b/iox_data_generator/src/lib.rs
index a94dda4b44..7874437fc0 100644
--- a/iox_data_generator/src/lib.rs
+++ b/iox_data_generator/src/lib.rs
@@ -29,14 +29,16 @@
clippy::dbg_macro
)]
-use crate::{agent::Agent, tag_set::GeneratedTagSets};
+use crate::{
+ agent::{Agent, AgentGenerateStats},
+ tag_set::GeneratedTagSets,
+};
use snafu::{ResultExt, Snafu};
use std::{
convert::TryFrom,
sync::{atomic::AtomicU64, Arc},
time::{SystemTime, UNIX_EPOCH},
};
-use tracing::info;
pub mod agent;
pub mod field;
@@ -136,6 +138,7 @@ pub async fn generate(
let start = std::time::Instant::now();
let total_rows = Arc::new(AtomicU64::new(0));
+ let total_requests = Arc::new(AtomicU64::new(0));
for database_assignments in &database_agents {
let (org, bucket) = org_and_bucket_from_database(database_assignments.database);
@@ -174,17 +177,28 @@ pub async fn generate(
let agent_points_writer = Arc::clone(&agent_points_writer);
let total_rows = Arc::clone(&total_rows);
+ let total_requests = Arc::clone(&total_requests);
handles.push(tokio::task::spawn(async move {
// did this weird hack because otherwise the stdout outputs would be jumbled
// together garbage
if one_agent_at_a_time {
let _l = lock_ref.lock().await;
agent
- .generate_all(agent_points_writer, batch_size, total_rows)
+ .generate_all(
+ agent_points_writer,
+ batch_size,
+ total_rows,
+ total_requests,
+ )
.await
} else {
agent
- .generate_all(agent_points_writer, batch_size, total_rows)
+ .generate_all(
+ agent_points_writer,
+ batch_size,
+ total_rows,
+ total_requests,
+ )
.await
}
}));
@@ -192,27 +206,28 @@ pub async fn generate(
}
}
- let mut total_points = 0;
+ let mut stats = vec![];
for handle in handles {
- total_points += handle
- .await
- .context(TokioSnafu)?
- .context(AgentCouldNotGeneratePointsSnafu)?;
+ stats.push(
+ handle
+ .await
+ .context(TokioSnafu)?
+ .context(AgentCouldNotGeneratePointsSnafu)?,
+ );
}
+ let stats = stats
+ .into_iter()
+ .fold(AgentGenerateStats::default(), |totals, res| {
+ AgentGenerateStats {
+ request_count: totals.request_count + res.request_count,
+ error_count: totals.error_count + res.error_count,
+ row_count: totals.row_count + res.row_count,
+ }
+ });
- let elapsed = start.elapsed();
-
- let points_sec = if elapsed.as_secs() == 0 {
- 0
- } else {
- total_points as u64 / elapsed.as_secs()
- };
- info!(
- "wrote {} total points in {:?} for a rate of {}/sec",
- total_points, elapsed, points_sec
- );
+ println!("{}", stats.display_stats(start.elapsed()));
- Ok(total_points)
+ Ok(stats.row_count)
}
/// Gets the current time in nanoseconds since the epoch
|
6a4fa11dba1c7c539c3202ca87208b31ba5f0ba9
|
Andrew Lamb
|
2023-01-18 21:21:10
|
Update to latest datafusion / arrow `31.0.0` (#6618)
|
* chore: Update datafusion and arrow/parquet/arrow-flight `31.0.0`
* chore: Update for new API
* chore: Run cargo hakari tasks
|
Co-authored-by: CircleCI[bot] <[email protected]>
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
chore: Update to latest datafusion / arrow `31.0.0` (#6618)
* chore: Update datafusion and arrow/parquet/arrow-flight `31.0.0`
* chore: Update for new API
* chore: Run cargo hakari tasks
Co-authored-by: CircleCI[bot] <[email protected]>
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
diff --git a/Cargo.lock b/Cargo.lock
index ca9a363131..dedf47119c 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -100,9 +100,9 @@ checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6"
[[package]]
name = "arrow"
-version = "30.0.1"
+version = "31.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1948f504d736dc6f71ea33773c5c7475998c44925be5321e9d18087a626845f5"
+checksum = "1b556d39f9d19e363833a0fe65d591cd0e2ecc0977589a78179b592bea8dc945"
dependencies = [
"ahash 0.8.2",
"arrow-arith",
@@ -123,9 +123,9 @@ dependencies = [
[[package]]
name = "arrow-arith"
-version = "30.0.1"
+version = "31.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5984187a7913813ffd5bb034fdc6810bdbe0ae4cff2292f0eb92797342dc02c8"
+checksum = "85c61b9235694b48f60d89e0e8d6cb478f39c65dd14b0fe1c3f04379b7d50068"
dependencies = [
"arrow-array",
"arrow-buffer",
@@ -138,9 +138,9 @@ dependencies = [
[[package]]
name = "arrow-array"
-version = "30.0.1"
+version = "31.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bf71dc342bb42343d331b58c0bcad095dc045e367493d47b7f4c4509e2adfee5"
+checksum = "a1e6e839764618a911cc460a58ebee5ad3d42bc12d9a5e96a29b7cc296303aa1"
dependencies = [
"ahash 0.8.2",
"arrow-buffer",
@@ -154,9 +154,9 @@ dependencies = [
[[package]]
name = "arrow-buffer"
-version = "30.0.1"
+version = "31.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a7b328d9f3e124cca761ec85a6d3fcea9bf8de1b8531c7a3b6abd367472024df"
+checksum = "03a21d232b1bc1190a3fdd2f9c1e39b7cd41235e95a0d44dd4f522bc5f495748"
dependencies = [
"half 2.2.1",
"num",
@@ -164,9 +164,9 @@ dependencies = [
[[package]]
name = "arrow-cast"
-version = "30.0.1"
+version = "31.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "03976edbf66ac00a582af10a51743f0a9611777adfd68c71799d783344c3bdd2"
+checksum = "83dcdb1436cac574f1c1b30fda91c53c467534337bef4064bbd4ea2d6fbc6e04"
dependencies = [
"arrow-array",
"arrow-buffer",
@@ -180,9 +180,9 @@ dependencies = [
[[package]]
name = "arrow-csv"
-version = "30.0.1"
+version = "31.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c1b610dc9e3b43bcebeacede47381252ea41363fbcc3c3eb641ff24fc94e567e"
+checksum = "a01677ae9458f5af9e35e1aa6ba97502f539e621db0c6672566403f97edd0448"
dependencies = [
"arrow-array",
"arrow-buffer",
@@ -199,9 +199,9 @@ dependencies = [
[[package]]
name = "arrow-data"
-version = "30.0.1"
+version = "31.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "174df8602dedcdb9149538809c11bd3c0888af30b915f763c66a3d724391c8b9"
+checksum = "14e3e69c9fd98357eeeab4aa0f626ecf7ecf663e68e8fc04eac87c424a414477"
dependencies = [
"arrow-buffer",
"arrow-schema",
@@ -211,20 +211,21 @@ dependencies = [
[[package]]
name = "arrow-flight"
-version = "30.0.1"
+version = "31.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cf32bc58976e558f1ba3ff0aa7b07914d4b70850906fc55d5d16790a9ee79e3f"
+checksum = "fd3ce08d31a1a24497bcf144029f8475539984aa50e41585e01b2057cf3dbb21"
dependencies = [
"arrow-array",
"arrow-buffer",
+ "arrow-cast",
"arrow-ipc",
"arrow-schema",
- "base64 0.20.0",
+ "base64 0.21.0",
"bytes",
"futures",
"proc-macro2",
"prost 0.11.6",
- "prost-build 0.11.5",
+ "prost-build 0.11.6",
"prost-derive 0.11.6",
"tokio",
"tonic",
@@ -233,9 +234,9 @@ dependencies = [
[[package]]
name = "arrow-ipc"
-version = "30.0.1"
+version = "31.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2a316907980e70fbf87b006c52993a22d93e4a9bca4ec2ac42cfedb2fdc204ac"
+checksum = "64cac2706acbd796965b6eaf0da30204fe44aacf70273f8cb3c9b7d7f3d4c190"
dependencies = [
"arrow-array",
"arrow-buffer",
@@ -247,9 +248,9 @@ dependencies = [
[[package]]
name = "arrow-json"
-version = "30.0.1"
+version = "31.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2cc1a1b2e98be0d8d20f932f76a8d976b779d502c8f6b828becc835d6879e903"
+checksum = "7790e8b7df2d8ef5ac802377ac256cf2fb80cbf7d44b82d6464e20ace6232a5a"
dependencies = [
"arrow-array",
"arrow-buffer",
@@ -265,9 +266,9 @@ dependencies = [
[[package]]
name = "arrow-ord"
-version = "30.0.1"
+version = "31.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7db83c14ddddf81c1d10ce303670f70b7687c8f52de7425b09ae905e4357fda5"
+checksum = "c7ee6e1b761dfffaaf7b5bbe68c113a576a3a802146c5c0b9fcec781e30d80a3"
dependencies = [
"arrow-array",
"arrow-buffer",
@@ -279,9 +280,9 @@ dependencies = [
[[package]]
name = "arrow-row"
-version = "30.0.1"
+version = "31.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "db41abdf586f1dba8c2973711d5c69ffb9d63688ffa46354b8c85bf9347a921c"
+checksum = "6e65bfedf782fc92721e796fdd26ae7343c98ba9a9243d62def9e4e1c4c1cf0b"
dependencies = [
"ahash 0.8.2",
"arrow-array",
@@ -294,15 +295,15 @@ dependencies = [
[[package]]
name = "arrow-schema"
-version = "30.0.1"
+version = "31.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a99dcc494fe6224e5ece572c5935d5109120a71df06bd8e04c4e23ac14dd8fac"
+checksum = "73ca49d010b27e2d73f70c1d1f90c1b378550ed0f4ad379c4dea0c997d97d723"
[[package]]
name = "arrow-select"
-version = "30.0.1"
+version = "31.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4e3a2cde3ea85b28f64704045d7d54e0fcc4b17efffced574d2dd3320218298f"
+checksum = "976cbaeb1a85c09eea81f3f9c149c758630ff422ed0238624c5c3f4704b6a53c"
dependencies = [
"arrow-array",
"arrow-buffer",
@@ -313,9 +314,9 @@ dependencies = [
[[package]]
name = "arrow-string"
-version = "30.0.1"
+version = "31.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "04cf8d0003ebe0aecc716e0ac8c858c570872a7485c7c6284975f31469703a0d"
+checksum = "3d4882762f8f48a9218946c016553d38b04b4fe8202038dad4141b3b887b7da8"
dependencies = [
"arrow-array",
"arrow-buffer",
@@ -1389,7 +1390,7 @@ dependencies = [
[[package]]
name = "datafusion"
version = "16.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=279440b2ab92d18675b8102e342d4d82182287dc#279440b2ab92d18675b8102e342d4d82182287dc"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=64fa312ecc5f32294e70fd7389e18cb41f25e732#64fa312ecc5f32294e70fd7389e18cb41f25e732"
dependencies = [
"ahash 0.8.2",
"arrow",
@@ -1435,7 +1436,7 @@ dependencies = [
[[package]]
name = "datafusion-common"
version = "16.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=279440b2ab92d18675b8102e342d4d82182287dc#279440b2ab92d18675b8102e342d4d82182287dc"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=64fa312ecc5f32294e70fd7389e18cb41f25e732#64fa312ecc5f32294e70fd7389e18cb41f25e732"
dependencies = [
"arrow",
"chrono",
@@ -1448,7 +1449,7 @@ dependencies = [
[[package]]
name = "datafusion-expr"
version = "16.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=279440b2ab92d18675b8102e342d4d82182287dc#279440b2ab92d18675b8102e342d4d82182287dc"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=64fa312ecc5f32294e70fd7389e18cb41f25e732#64fa312ecc5f32294e70fd7389e18cb41f25e732"
dependencies = [
"ahash 0.8.2",
"arrow",
@@ -1460,7 +1461,7 @@ dependencies = [
[[package]]
name = "datafusion-optimizer"
version = "16.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=279440b2ab92d18675b8102e342d4d82182287dc#279440b2ab92d18675b8102e342d4d82182287dc"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=64fa312ecc5f32294e70fd7389e18cb41f25e732#64fa312ecc5f32294e70fd7389e18cb41f25e732"
dependencies = [
"arrow",
"async-trait",
@@ -1476,7 +1477,7 @@ dependencies = [
[[package]]
name = "datafusion-physical-expr"
version = "16.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=279440b2ab92d18675b8102e342d4d82182287dc#279440b2ab92d18675b8102e342d4d82182287dc"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=64fa312ecc5f32294e70fd7389e18cb41f25e732#64fa312ecc5f32294e70fd7389e18cb41f25e732"
dependencies = [
"ahash 0.8.2",
"arrow",
@@ -1506,7 +1507,7 @@ dependencies = [
[[package]]
name = "datafusion-proto"
version = "16.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=279440b2ab92d18675b8102e342d4d82182287dc#279440b2ab92d18675b8102e342d4d82182287dc"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=64fa312ecc5f32294e70fd7389e18cb41f25e732#64fa312ecc5f32294e70fd7389e18cb41f25e732"
dependencies = [
"arrow",
"chrono",
@@ -1517,13 +1518,13 @@ dependencies = [
"parking_lot 0.12.1",
"pbjson-build",
"prost 0.11.6",
- "prost-build 0.11.5",
+ "prost-build 0.11.6",
]
[[package]]
name = "datafusion-row"
version = "16.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=279440b2ab92d18675b8102e342d4d82182287dc#279440b2ab92d18675b8102e342d4d82182287dc"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=64fa312ecc5f32294e70fd7389e18cb41f25e732#64fa312ecc5f32294e70fd7389e18cb41f25e732"
dependencies = [
"arrow",
"datafusion-common",
@@ -1534,7 +1535,7 @@ dependencies = [
[[package]]
name = "datafusion-sql"
version = "16.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=279440b2ab92d18675b8102e342d4d82182287dc#279440b2ab92d18675b8102e342d4d82182287dc"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=64fa312ecc5f32294e70fd7389e18cb41f25e732#64fa312ecc5f32294e70fd7389e18cb41f25e732"
dependencies = [
"arrow-schema",
"datafusion-common",
@@ -1950,7 +1951,7 @@ dependencies = [
"pbjson-types",
"predicate",
"prost 0.11.6",
- "prost-build 0.11.5",
+ "prost-build 0.11.6",
"query_functions",
"serde",
"snafu",
@@ -2008,7 +2009,7 @@ dependencies = [
"hyper",
"pin-project",
"prost 0.11.6",
- "prost-build 0.11.5",
+ "prost-build 0.11.6",
"prost-types 0.11.6",
"tokio",
"tokio-stream",
@@ -2023,7 +2024,7 @@ name = "grpc-binary-logger-proto"
version = "0.1.0"
dependencies = [
"prost 0.11.6",
- "prost-build 0.11.5",
+ "prost-build 0.11.6",
"prost-types 0.11.6",
"tonic",
"tonic-build",
@@ -2035,7 +2036,7 @@ name = "grpc-binary-logger-test-proto"
version = "0.1.0"
dependencies = [
"prost 0.11.6",
- "prost-build 0.11.5",
+ "prost-build 0.11.6",
"prost-types 0.11.6",
"tonic",
"tonic-build",
@@ -3884,9 +3885,9 @@ dependencies = [
[[package]]
name = "parquet"
-version = "30.0.1"
+version = "31.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4bba2a7630d2946f9e2020225062ad5619d70320e06dae6ae1074febf4c4e932"
+checksum = "6b4ee1ffc0778395c9783a5c74f2cad2fb1a128ade95a965212d31b7b13e3d45"
dependencies = [
"ahash 0.8.2",
"arrow-array",
@@ -3896,7 +3897,7 @@ dependencies = [
"arrow-ipc",
"arrow-schema",
"arrow-select",
- "base64 0.20.0",
+ "base64 0.21.0",
"brotli",
"bytes",
"chrono",
@@ -4012,7 +4013,7 @@ dependencies = [
"pbjson",
"pbjson-build",
"prost 0.11.6",
- "prost-build 0.11.5",
+ "prost-build 0.11.6",
"serde",
]
@@ -4202,7 +4203,7 @@ dependencies = [
"once_cell",
"parking_lot 0.12.1",
"prost 0.11.6",
- "prost-build 0.11.5",
+ "prost-build 0.11.6",
"prost-derive 0.11.6",
"sha2",
"smallvec",
@@ -4238,9 +4239,9 @@ dependencies = [
[[package]]
name = "predicates"
-version = "2.1.1"
+version = "2.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a5aab5be6e4732b473071984b3164dbbfb7a3674d30ea5ff44410b6bcd960c3c"
+checksum = "59230a63c37f3e18569bdb90e4a89cbf5bf8b06fea0b84e65ea10cc4df47addd"
dependencies = [
"difflib",
"float-cmp",
@@ -4402,9 +4403,9 @@ dependencies = [
[[package]]
name = "prost-build"
-version = "0.11.5"
+version = "0.11.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cb5320c680de74ba083512704acb90fe00f28f79207286a848e730c45dd73ed6"
+checksum = "a3f8ad728fb08fe212df3c05169e940fbb6d9d16a877ddde14644a983ba2012e"
dependencies = [
"bytes",
"heck",
@@ -5986,7 +5987,7 @@ checksum = "5bf5e9b9c0f7e0a7c027dcfaba7b2c60816c7049171f679d99ee2ff65d0de8c4"
dependencies = [
"prettyplease",
"proc-macro2",
- "prost-build 0.11.5",
+ "prost-build 0.11.6",
"quote",
"syn",
]
@@ -6642,6 +6643,7 @@ dependencies = [
"arrow-ord",
"arrow-string",
"base64 0.13.1",
+ "base64 0.21.0",
"bitflags",
"byteorder",
"bytes",
diff --git a/Cargo.toml b/Cargo.toml
index afdc566209..8e183f48c2 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -116,12 +116,12 @@ edition = "2021"
license = "MIT OR Apache-2.0"
[workspace.dependencies]
-arrow = { version = "30.0.0" }
-arrow-flight = { version = "30.0.0" }
-datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev="279440b2ab92d18675b8102e342d4d82182287dc", default-features = false }
-datafusion-proto = { git = "https://github.com/apache/arrow-datafusion.git", rev="279440b2ab92d18675b8102e342d4d82182287dc" }
+arrow = { version = "31.0.0" }
+arrow-flight = { version = "31.0.0" }
+datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev="64fa312ecc5f32294e70fd7389e18cb41f25e732", default-features = false }
+datafusion-proto = { git = "https://github.com/apache/arrow-datafusion.git", rev="64fa312ecc5f32294e70fd7389e18cb41f25e732" }
hashbrown = { version = "0.13.2" }
-parquet = { version = "30.0.0" }
+parquet = { version = "31.0.0" }
# This profile optimizes for runtime performance and small binary size at the expense of longer
# build times. It's most suitable for final release builds.
diff --git a/service_common/src/error.rs b/service_common/src/error.rs
index 30b1ca1bee..b2504efd82 100644
--- a/service_common/src/error.rs
+++ b/service_common/src/error.rs
@@ -63,6 +63,9 @@ pub fn datafusion_error_to_tonic_code(e: &DataFusionError) -> tonic::Code {
// errors (e.g. misconfiguration or bad path) which would be
// an internal error and thus we classify them as such.
| DataFusionError::External(_)
+ // Substrait errors come from internal code and are unused
+ // with DataFusion at the moment
+ | DataFusionError::Substrait(_)
| DataFusionError::Internal(_) => tonic::Code::Internal,
// explicitly don't have a catchall here so any
// newly added DataFusion error will raise a compiler error for us to address
diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml
index 3f9731d22a..f3bfe979a0 100644
--- a/workspace-hack/Cargo.toml
+++ b/workspace-hack/Cargo.toml
@@ -17,18 +17,19 @@ license.workspace = true
### BEGIN HAKARI SECTION
[dependencies]
ahash = { version = "0.8", default-features = false, features = ["getrandom", "runtime-rng"] }
-arrow = { version = "30", features = ["arrow-csv", "arrow-ipc", "arrow-json", "comfy-table", "csv", "dyn_cmp_dict", "ipc", "json", "prettyprint"] }
-arrow-flight = { version = "30", features = ["flight-sql-experimental"] }
-arrow-ord = { version = "30", default-features = false, features = ["dyn_cmp_dict"] }
-arrow-string = { version = "30", default-features = false, features = ["dyn_cmp_dict"] }
-base64 = { version = "0.13", features = ["std"] }
+arrow = { version = "31", features = ["arrow-csv", "arrow-ipc", "arrow-json", "comfy-table", "csv", "dyn_cmp_dict", "ipc", "json", "prettyprint"] }
+arrow-flight = { version = "31", features = ["flight-sql-experimental"] }
+arrow-ord = { version = "31", default-features = false, features = ["dyn_cmp_dict"] }
+arrow-string = { version = "31", default-features = false, features = ["dyn_cmp_dict"] }
+base64-594e8ee84c453af0 = { package = "base64", version = "0.13", features = ["std"] }
+base64-647d43efb71741da = { package = "base64", version = "0.21", features = ["std"] }
bitflags = { version = "1" }
byteorder = { version = "1", features = ["std"] }
bytes = { version = "1", features = ["std"] }
chrono = { version = "0.4", default-features = false, features = ["alloc", "clock", "iana-time-zone", "serde", "std", "winapi"] }
crossbeam-utils = { version = "0.8", features = ["std"] }
crypto-common = { version = "0.1", default-features = false, features = ["std"] }
-datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "279440b2ab92d18675b8102e342d4d82182287dc", features = ["async-compression", "bzip2", "compression", "crypto_expressions", "flate2", "regex_expressions", "unicode_expressions", "xz2"] }
+datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "64fa312ecc5f32294e70fd7389e18cb41f25e732", features = ["async-compression", "bzip2", "compression", "crypto_expressions", "flate2", "regex_expressions", "unicode_expressions", "xz2"] }
digest = { version = "0.10", features = ["alloc", "block-buffer", "core-api", "mac", "std", "subtle"] }
either = { version = "1", features = ["use_std"] }
fixedbitset = { version = "0.4", features = ["std"] }
@@ -56,9 +57,9 @@ num-traits = { version = "0.2", features = ["i128", "libm", "std"] }
object_store = { version = "0.5", default-features = false, features = ["aws", "azure", "base64", "cloud", "gcp", "quick-xml", "rand", "reqwest", "ring", "rustls-pemfile", "serde", "serde_json"] }
once_cell = { version = "1", features = ["alloc", "parking_lot", "parking_lot_core", "race", "std"] }
parking_lot = { version = "0.12", features = ["arc_lock"] }
-parquet = { version = "30", features = ["arrow", "arrow-array", "arrow-buffer", "arrow-cast", "arrow-data", "arrow-ipc", "arrow-schema", "arrow-select", "async", "base64", "brotli", "experimental", "flate2", "futures", "lz4", "snap", "tokio", "zstd"] }
+parquet = { version = "31", features = ["arrow", "arrow-array", "arrow-buffer", "arrow-cast", "arrow-data", "arrow-ipc", "arrow-schema", "arrow-select", "async", "base64", "brotli", "experimental", "flate2", "futures", "lz4", "snap", "tokio", "zstd"] }
phf_shared = { version = "0.11", features = ["std"] }
-predicates = { version = "2", features = ["diff", "difflib", "float-cmp", "normalize-line-endings", "regex"] }
+predicates = { version = "2", features = ["diff", "float-cmp", "normalize-line-endings", "regex"] }
prost = { version = "0.11", features = ["prost-derive", "std"] }
prost-types = { version = "0.11", features = ["std"] }
rand = { version = "0.8", features = ["alloc", "getrandom", "libc", "rand_chacha", "small_rng", "std", "std_rng"] }
@@ -93,7 +94,8 @@ zstd-safe = { version = "6", default-features = false, features = ["arrays", "le
zstd-sys = { version = "2", default-features = false, features = ["legacy", "std", "zdict_builder"] }
[build-dependencies]
-base64 = { version = "0.13", features = ["std"] }
+base64-594e8ee84c453af0 = { package = "base64", version = "0.13", features = ["std"] }
+base64-647d43efb71741da = { package = "base64", version = "0.21", features = ["std"] }
bitflags = { version = "1" }
byteorder = { version = "1", features = ["std"] }
bytes = { version = "1", features = ["std"] }
|
3425bc176e5fcf07bc90e9f8249118214a44b5e6
|
Fraser Savage
|
2023-04-13 16:33:41
|
Surface stats for new namespace schema in cache
|
The previous behaviour of the router's NamespaceCache was to provide
put semantics where the entire schema in the cache is replaced. With
the addition of the additive merging side-effect, the metrics decorator
could not compute the correct statistics. This calculates them during
the merge and surfaces the result to the caller.
| null |
refactor(router): Surface stats for new namespace schema in cache
The previous behaviour of the router's NamespaceCache was to provide
put semantics where the entire schema in the cache is replaced. With
the addition of the additive merging side-effect, the metrics decorator
could not compute the correct statistics. This calculates them during
the merge and surfaces the result to the caller.
|
diff --git a/router/src/namespace_cache.rs b/router/src/namespace_cache.rs
index 3a219ee182..dc9bceb09c 100644
--- a/router/src/namespace_cache.rs
+++ b/router/src/namespace_cache.rs
@@ -36,5 +36,26 @@ pub trait NamespaceCache: Debug + Send + Sync {
&self,
namespace: NamespaceName<'static>,
schema: impl Into<Arc<NamespaceSchema>>,
- ) -> Option<Arc<NamespaceSchema>>;
+ ) -> (Option<Arc<NamespaceSchema>>, NamespaceStats);
+}
+
+#[derive(Debug, PartialEq, Eq)]
+/// An encapsulation of statistics associated with a namespace schema.
+pub struct NamespaceStats {
+ /// Number of tables within the namespace
+ pub table_count: u64,
+ /// Total number of columns across all tables within the namespace
+ pub column_count: u64,
+}
+
+impl NamespaceStats {
+ /// Derives a set of [`NamespaceStats`] from the given schema.
+ pub fn new(ns: &NamespaceSchema) -> Self {
+ let table_count = ns.tables.len() as _;
+ let column_count = ns.tables.values().fold(0, |acc, t| acc + t.columns.len()) as _;
+ Self {
+ table_count,
+ column_count,
+ }
+ }
}
diff --git a/router/src/namespace_cache/memory.rs b/router/src/namespace_cache/memory.rs
index 86a25b7925..8c2c64bfa5 100644
--- a/router/src/namespace_cache/memory.rs
+++ b/router/src/namespace_cache/memory.rs
@@ -6,7 +6,7 @@ use hashbrown::HashMap;
use parking_lot::RwLock;
use thiserror::Error;
-use super::NamespaceCache;
+use super::{NamespaceCache, NamespaceStats};
/// An error type indicating that `namespace` is not present in the cache.
#[derive(Debug, Error)]
@@ -43,33 +43,45 @@ impl NamespaceCache for Arc<MemoryNamespaceCache> {
&self,
namespace: NamespaceName<'static>,
schema: impl Into<Arc<NamespaceSchema>>,
- ) -> Option<Arc<NamespaceSchema>> {
+ ) -> (Option<Arc<NamespaceSchema>>, NamespaceStats) {
let mut guard = self.cache.write();
let new_ns = schema.into();
+ let new_stats = NamespaceStats::new(&new_ns);
+
match guard.get(&namespace) {
Some(old_ns) => {
// If the previous tenant has a different ID then take the new
// schema. The old may have been replaced.
if old_ns.id != new_ns.id {
- return guard.insert(namespace, new_ns);
+ return (guard.insert(namespace, new_ns), new_stats);
}
let mut new_ns = (*new_ns).clone();
+ // The column count can be computed as part of the merge process
+ // here to save on additional iteration.
+ let mut new_column_count: u64 = 0;
for (table_name, new_table) in &mut new_ns.tables {
+ new_column_count += new_table.columns.len() as u64;
let old_columns = match old_ns.tables.get(table_name) {
Some(v) => &v.columns,
None => continue,
};
+
for (column_name, column) in old_columns {
if !new_table.columns.contains_key(column_name) {
+ new_column_count += 1;
new_table.columns.insert(column_name.clone(), *column);
}
}
}
- guard.insert(namespace, Arc::new(new_ns))
+ let new_stats = NamespaceStats {
+ table_count: new_ns.tables.len() as _,
+ column_count: new_column_count,
+ };
+ (guard.insert(namespace, Arc::new(new_ns)), new_stats)
}
- None => guard.insert(namespace, new_ns),
+ None => (guard.insert(namespace, new_ns), new_stats),
}
}
}
@@ -106,7 +118,7 @@ mod tests {
max_tables: 24,
retention_period_ns: Some(876),
};
- assert!(cache.put_schema(ns.clone(), schema1.clone()).is_none());
+ assert_matches!(cache.put_schema(ns.clone(), schema1.clone()), (None, _));
assert_eq!(
*cache.get_schema(&ns).await.expect("lookup failure"),
schema1
@@ -122,11 +134,12 @@ mod tests {
retention_period_ns: Some(876),
};
- assert_eq!(
- *cache
- .put_schema(ns.clone(), schema2.clone())
- .expect("should have existing schema"),
- schema1
+ assert_matches!(
+ cache
+ .put_schema(ns.clone(), schema2.clone()),
+ (Some(prev), _) => {
+ assert_eq!(*prev, schema1);
+ }
);
assert_eq!(
*cache.get_schema(&ns).await.expect("lookup failure"),
@@ -198,8 +211,18 @@ mod tests {
let cache_clone = Arc::clone(&cache);
let ns_clone = ns.clone();
tokio::task::spawn(async move {
- cache_clone.put_schema(ns_clone.clone(), schema_update_1);
- cache_clone.put_schema(ns_clone.clone(), schema_update_2);
+ assert_matches!(cache_clone.put_schema(ns_clone.clone(), schema_update_1), (None, new_stats) => {
+ assert_eq!(new_stats, NamespaceStats{
+ table_count: 1,
+ column_count: 1,
+ });
+ });
+ assert_matches!(cache_clone.put_schema(ns_clone.clone(), schema_update_2), (Some(_), new_stats) => {
+ assert_eq!(new_stats, NamespaceStats{
+ table_count: 1,
+ column_count: 2,
+ });
+ });
})
.await
.unwrap();
diff --git a/router/src/namespace_cache/metrics.rs b/router/src/namespace_cache/metrics.rs
index 5df5d1752d..ba9cd1b5a9 100644
--- a/router/src/namespace_cache/metrics.rs
+++ b/router/src/namespace_cache/metrics.rs
@@ -7,7 +7,7 @@ use data_types::{NamespaceName, NamespaceSchema};
use iox_time::{SystemProvider, TimeProvider};
use metric::{DurationHistogram, Metric, U64Gauge};
-use super::NamespaceCache;
+use super::{NamespaceCache, NamespaceStats};
/// An [`InstrumentedCache`] decorates a [`NamespaceCache`] with cache read
/// hit/miss and cache put insert/update metrics.
@@ -101,14 +101,13 @@ where
&self,
namespace: NamespaceName<'static>,
schema: impl Into<Arc<NamespaceSchema>>,
- ) -> Option<Arc<NamespaceSchema>> {
+ ) -> (Option<Arc<NamespaceSchema>>, NamespaceStats) {
let schema = schema.into();
- let stats = NamespaceStats::new(&schema);
let t = self.time_provider.now();
- let res = self.inner.put_schema(namespace, schema);
+ let (previous, new_stats) = self.inner.put_schema(namespace, schema);
- match res {
+ match previous {
Some(v) => {
if let Some(delta) = self.time_provider.now().checked_duration_since(t) {
self.put_update.record(delta);
@@ -117,14 +116,15 @@ where
// Figure out the difference between the new namespace and the
// evicted old namespace
let old_stats = NamespaceStats::new(&v);
- let table_count_diff = stats.table_count as i64 - old_stats.table_count as i64;
- let column_count_diff = stats.column_count as i64 - old_stats.column_count as i64;
+ let table_count_diff = new_stats.table_count as i64 - old_stats.table_count as i64;
+ let column_count_diff =
+ new_stats.column_count as i64 - old_stats.column_count as i64;
// Adjust the metrics to reflect the change
self.table_count.delta(table_count_diff);
self.column_count.delta(column_count_diff);
- Some(v)
+ (Some(v), new_stats)
}
None => {
if let Some(delta) = self.time_provider.now().checked_duration_since(t) {
@@ -132,36 +132,20 @@ where
}
// Add the new namespace stats to the counts.
- self.table_count.inc(stats.table_count);
- self.column_count.inc(stats.column_count);
+ self.table_count.inc(new_stats.table_count);
+ self.column_count.inc(new_stats.column_count);
- None
+ (None, new_stats)
}
}
}
}
-#[derive(Debug)]
-struct NamespaceStats {
- table_count: u64,
- column_count: u64,
-}
-
-impl NamespaceStats {
- fn new(ns: &NamespaceSchema) -> Self {
- let table_count = ns.tables.len() as _;
- let column_count = ns.tables.values().fold(0, |acc, t| acc + t.columns.len()) as _;
- Self {
- table_count,
- column_count,
- }
- }
-}
-
#[cfg(test)]
mod tests {
use std::collections::BTreeMap;
+ use assert_matches::assert_matches;
use data_types::{
ColumnId, ColumnSchema, ColumnType, NamespaceId, QueryPoolId, TableId, TableSchema, TopicId,
};
@@ -240,7 +224,7 @@ mod tests {
// No tables
let schema = new_schema(&[]);
- assert!(cache.put_schema(ns.clone(), schema).is_none());
+ assert_matches!(cache.put_schema(ns.clone(), schema), (None, _));
assert_histogram_hit(
®istry,
"namespace_cache_put_duration",
@@ -258,7 +242,7 @@ mod tests {
// Add a table with 1 column
let schema = new_schema(&[1]);
- assert!(cache.put_schema(ns.clone(), schema).is_some());
+ assert_matches!(cache.put_schema(ns.clone(), schema), (Some(_), _));
assert_histogram_hit(
®istry,
"namespace_cache_put_duration",
@@ -276,7 +260,7 @@ mod tests {
// Increase the number of columns in this one table
let schema = new_schema(&[5]);
- assert!(cache.put_schema(ns.clone(), schema).is_some());
+ assert_matches!(cache.put_schema(ns.clone(), schema), (Some(_), _));
assert_histogram_hit(
®istry,
"namespace_cache_put_duration",
@@ -292,27 +276,9 @@ mod tests {
assert_eq!(cache.table_count.observe(), Observation::U64Gauge(1));
assert_eq!(cache.column_count.observe(), Observation::U64Gauge(5));
- // Decrease the number of columns
- let schema = new_schema(&[2]);
- assert!(cache.put_schema(ns.clone(), schema).is_some());
- assert_histogram_hit(
- ®istry,
- "namespace_cache_put_duration",
- ("op", "insert"),
- 1,
- ); // Unchanged
- assert_histogram_hit(
- ®istry,
- "namespace_cache_put_duration",
- ("op", "update"),
- 3,
- );
- assert_eq!(cache.table_count.observe(), Observation::U64Gauge(1));
- assert_eq!(cache.column_count.observe(), Observation::U64Gauge(2));
-
// Add another table
- let schema = new_schema(&[2, 5]);
- assert!(cache.put_schema(ns.clone(), schema).is_some());
+ let schema = new_schema(&[5, 5]);
+ assert_matches!(cache.put_schema(ns.clone(), schema), (Some(_), _));
assert_histogram_hit(
®istry,
"namespace_cache_put_duration",
@@ -323,14 +289,14 @@ mod tests {
®istry,
"namespace_cache_put_duration",
("op", "update"),
- 4,
+ 3,
);
assert_eq!(cache.table_count.observe(), Observation::U64Gauge(2));
- assert_eq!(cache.column_count.observe(), Observation::U64Gauge(7));
+ assert_eq!(cache.column_count.observe(), Observation::U64Gauge(10));
- // Add another table and adjust the existing tables (one up, one down)
- let schema = new_schema(&[1, 10, 4]);
- assert!(cache.put_schema(ns.clone(), schema).is_some());
+ // Add another table and adjust an existing table (increased column count)
+ let schema = new_schema(&[5, 10, 4]);
+ assert_matches!(cache.put_schema(ns.clone(), schema), (Some(_), _));
assert_histogram_hit(
®istry,
"namespace_cache_put_duration",
@@ -341,14 +307,14 @@ mod tests {
®istry,
"namespace_cache_put_duration",
("op", "update"),
- 5,
+ 4,
);
assert_eq!(cache.table_count.observe(), Observation::U64Gauge(3));
- assert_eq!(cache.column_count.observe(), Observation::U64Gauge(15));
+ assert_eq!(cache.column_count.observe(), Observation::U64Gauge(19));
// Remove a table
- let schema = new_schema(&[1, 10]);
- assert!(cache.put_schema(ns, schema).is_some());
+ let schema = new_schema(&[5, 10]);
+ assert_matches!(cache.put_schema(ns, schema), (Some(_), _));
assert_histogram_hit(
®istry,
"namespace_cache_put_duration",
@@ -359,15 +325,15 @@ mod tests {
®istry,
"namespace_cache_put_duration",
("op", "update"),
- 6,
+ 5,
);
assert_eq!(cache.table_count.observe(), Observation::U64Gauge(2));
- assert_eq!(cache.column_count.observe(), Observation::U64Gauge(11));
+ assert_eq!(cache.column_count.observe(), Observation::U64Gauge(15));
// Add a new namespace
let ns = NamespaceName::new("another").expect("namespace name is valid");
let schema = new_schema(&[10, 12, 9]);
- assert!(cache.put_schema(ns.clone(), schema).is_none());
+ assert_matches!(cache.put_schema(ns.clone(), schema), (None, _));
assert_histogram_hit(
®istry,
"namespace_cache_put_duration",
@@ -378,10 +344,10 @@ mod tests {
®istry,
"namespace_cache_put_duration",
("op", "update"),
- 6,
+ 5,
);
assert_eq!(cache.table_count.observe(), Observation::U64Gauge(5));
- assert_eq!(cache.column_count.observe(), Observation::U64Gauge(42));
+ assert_eq!(cache.column_count.observe(), Observation::U64Gauge(46)); // 15 + new columns (31)
let _got = cache.get_schema(&ns).await.expect("should exist");
assert_histogram_hit(
diff --git a/router/src/namespace_cache/read_through_cache.rs b/router/src/namespace_cache/read_through_cache.rs
index 697ab93690..b0fd56b06c 100644
--- a/router/src/namespace_cache/read_through_cache.rs
+++ b/router/src/namespace_cache/read_through_cache.rs
@@ -8,7 +8,7 @@ use iox_catalog::interface::{get_schema_by_name, Catalog, SoftDeletedRows};
use observability_deps::tracing::*;
use super::memory::CacheMissErr;
-use super::NamespaceCache;
+use super::{NamespaceCache, NamespaceStats};
/// A [`ReadThroughCache`] decorates a [`NamespaceCache`] with read-through
/// caching behaviour on calls to `self.get_schema()` when contained in an
@@ -90,7 +90,7 @@ where
&self,
namespace: NamespaceName<'static>,
schema: impl Into<Arc<NamespaceSchema>>,
- ) -> Option<Arc<NamespaceSchema>> {
+ ) -> (Option<Arc<NamespaceSchema>>, NamespaceStats) {
self.inner_cache.put_schema(namespace, schema)
}
}
@@ -126,7 +126,7 @@ mod tests {
iox_catalog::DEFAULT_MAX_TABLES,
iox_catalog::DEFAULT_RETENTION_PERIOD,
);
- assert_matches!(cache.put_schema(ns.clone(), schema1.clone()), None);
+ assert_matches!(cache.put_schema(ns.clone(), schema1.clone()), (None, _));
// Ensure it is present
assert_eq!(
diff --git a/router/src/namespace_cache/sharded_cache.rs b/router/src/namespace_cache/sharded_cache.rs
index 288f9a45b6..0a01ace184 100644
--- a/router/src/namespace_cache/sharded_cache.rs
+++ b/router/src/namespace_cache/sharded_cache.rs
@@ -4,7 +4,7 @@ use async_trait::async_trait;
use data_types::{NamespaceName, NamespaceSchema};
use sharder::JumpHash;
-use super::NamespaceCache;
+use super::{NamespaceCache, NamespaceStats};
/// A decorator sharding the [`NamespaceCache`] keyspace into a set of `T`.
#[derive(Debug)]
@@ -40,7 +40,7 @@ where
&self,
namespace: NamespaceName<'static>,
schema: impl Into<Arc<NamespaceSchema>>,
- ) -> Option<Arc<NamespaceSchema>> {
+ ) -> (Option<Arc<NamespaceSchema>>, NamespaceStats) {
self.shards.hash(&namespace).put_schema(namespace, schema)
}
}
@@ -107,7 +107,7 @@ mod tests {
// Populate the cache
for (name, id) in &names {
let schema = schema_with_id(*id as _);
- assert!(cache.put_schema(name.clone(), schema).is_none());
+ assert_matches!(cache.put_schema(name.clone(), schema), (None, _));
}
// The mapping should be stable
|
ae3f73f65e8cf604597c4d3aa485035b441b34f4
|
Dom Dwyer
|
2023-08-23 15:13:13
|
optional ParquetFile::to_delete
|
This field is nullable, so lets model it as nullable.
| null |
refactor(proto): optional ParquetFile::to_delete
This field is nullable, so lets model it as nullable.
|
diff --git a/generated_types/protos/influxdata/iox/catalog/v1/parquet_file.proto b/generated_types/protos/influxdata/iox/catalog/v1/parquet_file.proto
index e61c4430a8..bc6c135cfe 100644
--- a/generated_types/protos/influxdata/iox/catalog/v1/parquet_file.proto
+++ b/generated_types/protos/influxdata/iox/catalog/v1/parquet_file.proto
@@ -32,7 +32,7 @@ message ParquetFile {
// the max timestamp of data in this file
int64 max_time = 10;
// the optional timestamp of when this file was marked for deletion
- int64 to_delete = 11;
+ optional int64 to_delete = 11;
// the file size in bytes
int64 file_size_bytes = 12;
// the number of rows in this file
diff --git a/gossip_parquet_file/src/lib.rs b/gossip_parquet_file/src/lib.rs
index 05557cc238..4f21de0149 100644
--- a/gossip_parquet_file/src/lib.rs
+++ b/gossip_parquet_file/src/lib.rs
@@ -199,7 +199,7 @@ mod tests {
object_store_id: "bananas".to_string(),
min_time: 1,
max_time: 100,
- to_delete: 0,
+ to_delete: Some(0),
file_size_bytes: 424242,
row_count: 4242111,
compaction_level: 4200,
diff --git a/service_grpc_catalog/src/lib.rs b/service_grpc_catalog/src/lib.rs
index 5353503417..3d7592639a 100644
--- a/service_grpc_catalog/src/lib.rs
+++ b/service_grpc_catalog/src/lib.rs
@@ -218,7 +218,7 @@ fn to_parquet_file(p: data_types::ParquetFile) -> ParquetFile {
object_store_id: p.object_store_id.to_string(),
min_time: p.min_time.get(),
max_time: p.max_time.get(),
- to_delete: p.to_delete.map(|t| t.get()).unwrap_or(0),
+ to_delete: p.to_delete.map(|t| t.get()),
file_size_bytes: p.file_size_bytes,
row_count: p.row_count,
compaction_level: p.compaction_level as i32,
|
47214ec9a07a6ec5f81cea3e8e5a83bd1d863215
|
Dom Dwyer
|
2023-05-30 11:06:38
|
prevent panics in partitioning logic
|
Changes the partitioning logic to be fallible. This prevents an invalid
partition template from causing a panic, previously possible through two
known code paths:
* TagValue formatter referencing a non-tag column
* Time formatter using an invalid strftime format string
If either occurs, the write attempt is now aborted and an error returned
to the user with a HTTP 500 status code.
Additionally unexpected partitioner errors now map to a catch-all error
instead of panicking.
| null |
fix: prevent panics in partitioning logic
Changes the partitioning logic to be fallible. This prevents an invalid
partition template from causing a panic, previously possible through two
known code paths:
* TagValue formatter referencing a non-tag column
* Time formatter using an invalid strftime format string
If either occurs, the write attempt is now aborted and an error returned
to the user with a HTTP 500 status code.
Additionally unexpected partitioner errors now map to a catch-all error
instead of panicking.
|
diff --git a/Cargo.lock b/Cargo.lock
index d0e482eafd..818bc33460 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -3572,6 +3572,7 @@ version = "0.1.0"
dependencies = [
"arrow",
"arrow_util",
+ "assert_matches",
"chrono",
"data_types",
"hashbrown 0.13.2",
@@ -3584,6 +3585,7 @@ dependencies = [
"rand 0.8.5",
"schema",
"snafu",
+ "thiserror",
"workspace-hack",
]
diff --git a/mutable_batch/Cargo.toml b/mutable_batch/Cargo.toml
index 73311c1948..51e644b7a0 100644
--- a/mutable_batch/Cargo.toml
+++ b/mutable_batch/Cargo.toml
@@ -18,8 +18,10 @@ hashbrown = { workspace = true }
itertools = "0.10"
workspace-hack = { version = "0.1", path = "../workspace-hack" }
percent-encoding = "2.2.0"
+thiserror = "1.0.40"
[dev-dependencies]
+assert_matches = "1.5.0"
mutable_batch_lp = { path = "../mutable_batch_lp" }
paste = "1.0.12"
proptest = "1.2.0"
diff --git a/mutable_batch/src/payload.rs b/mutable_batch/src/payload.rs
index 74ffafd9b9..d22ce0a4e5 100644
--- a/mutable_batch/src/payload.rs
+++ b/mutable_batch/src/payload.rs
@@ -6,6 +6,8 @@ use hashbrown::HashMap;
use schema::TIME_COLUMN_NAME;
use std::{num::NonZeroUsize, ops::Range};
+pub use self::partition::PartitionKeyError;
+
mod filter;
mod partition;
@@ -101,7 +103,7 @@ impl<'a> PartitionWrite<'a> {
pub fn partition(
batch: &'a MutableBatch,
partition_template: &TablePartitionTemplateOverride,
- ) -> HashMap<PartitionKey, Self> {
+ ) -> Result<HashMap<PartitionKey, Self>, PartitionKeyError> {
use hashbrown::hash_map::Entry;
let time = get_time_column(batch);
@@ -110,7 +112,7 @@ impl<'a> PartitionWrite<'a> {
let row_count = NonZeroUsize::new(range.end - range.start).unwrap();
let (min_timestamp, max_timestamp) = min_max_time(&time[range.clone()]);
- match partition_ranges.entry(PartitionKey::from(partition)) {
+ match partition_ranges.entry(PartitionKey::from(partition?)) {
Entry::Vacant(v) => {
v.insert(PartitionWrite {
batch,
@@ -129,7 +131,7 @@ impl<'a> PartitionWrite<'a> {
}
}
}
- partition_ranges
+ Ok(partition_ranges)
}
}
diff --git a/mutable_batch/src/payload/partition.rs b/mutable_batch/src/payload/partition.rs
index c88951bdab..34edb77c5a 100644
--- a/mutable_batch/src/payload/partition.rs
+++ b/mutable_batch/src/payload/partition.rs
@@ -5,25 +5,46 @@
//!
//! The partitioning template, derived partition key format, and encodings are
//! described in detail in the [`data_types::partition_template`] module.
+use std::{borrow::Cow, fmt::Write, ops::Range};
-use crate::{
- column::{Column, ColumnData},
- MutableBatch,
-};
use chrono::{format::StrftimeItems, TimeZone, Utc};
use data_types::partition_template::{
TablePartitionTemplateOverride, TemplatePart, ENCODED_PARTITION_KEY_CHARS,
PARTITION_KEY_DELIMITER, PARTITION_KEY_VALUE_EMPTY_STR, PARTITION_KEY_VALUE_NULL_STR,
};
use percent_encoding::utf8_percent_encode;
-use schema::TIME_COLUMN_NAME;
-use std::{borrow::Cow, ops::Range};
+use schema::{InfluxColumnType, TIME_COLUMN_NAME};
+use thiserror::Error;
+
+use crate::{
+ column::{Column, ColumnData},
+ MutableBatch,
+};
+
+/// An error generating a partition key for a row.
+#[derive(Debug, Error, PartialEq, Eq)]
+pub enum PartitionKeyError {
+ /// The partition template defines a [`Template::TimeFormat`] part, but the
+ /// provided strftime formatter is invalid.
+ #[error("invalid strftime format in partition template: {0}")]
+ InvalidStrftime(String),
+
+ /// The partition template defines a [`Template::TagValue`] part, but the
+ /// column type is not "tag".
+ #[error("tag value partitioner does not accept input columns of type {0:?}")]
+ TagValueNotTag(InfluxColumnType),
+
+ /// A "catch all" error for when a formatter returns [`std::fmt::Error`],
+ /// which contains no context.
+ #[error("partition key generation error")]
+ FmtError(#[from] std::fmt::Error),
+}
/// Returns an iterator identifying consecutive ranges for a given partition key
pub fn partition_batch<'a>(
batch: &'a MutableBatch,
template: &'a TablePartitionTemplateOverride,
-) -> impl Iterator<Item = (String, Range<usize>)> + 'a {
+) -> impl Iterator<Item = (Result<String, PartitionKeyError>, Range<usize>)> + 'a {
range_encode(partition_keys(batch, template.parts()))
}
@@ -48,7 +69,11 @@ enum Template<'a> {
impl<'a> Template<'a> {
/// Renders this template to `out` for the row `idx`.
- fn fmt_row<W: std::fmt::Write>(&self, out: &mut W, idx: usize) -> std::fmt::Result {
+ fn fmt_row<W: std::fmt::Write>(
+ &self,
+ out: &mut W,
+ idx: usize,
+ ) -> Result<(), PartitionKeyError> {
match self {
Template::TagValue(col) if col.valid.get(idx) => match &col.data {
ColumnData::Tag(col_data, dictionary, _) => out.write_str(never_empty(
@@ -57,30 +82,35 @@ impl<'a> Template<'a> {
&ENCODED_PARTITION_KEY_CHARS,
))
.as_ref(),
- )),
- other => panic!(
- "partitioning only works on tag columns, but column was type `{other:?}`"
- ),
+ ))?,
+ _ => return Err(PartitionKeyError::TagValueNotTag(col.influx_type())),
},
Template::TimeFormat(t, format) => {
- let formatted = Utc
- .timestamp_nanos(t[idx])
- .format_with_items(format.clone()) // Cheap clone of refs
- .to_string();
+ let mut s = String::new();
+ write!(
+ s,
+ "{}",
+ Utc.timestamp_nanos(t[idx])
+ .format_with_items(format.clone()) // Cheap clone of refs
+ )
+ .map_err(|_| PartitionKeyError::InvalidStrftime(format!("{format:?}")))?;
+
out.write_str(
Cow::from(utf8_percent_encode(
- formatted.as_str(),
+ s.as_str(),
&ENCODED_PARTITION_KEY_CHARS,
))
.as_ref(),
- )
+ )?
}
// Either a tag that has no value for this given row index, or the
// batch does not contain this tag at all.
Template::TagValue(_) | Template::MissingTag => {
- out.write_str(PARTITION_KEY_VALUE_NULL_STR)
+ out.write_str(PARTITION_KEY_VALUE_NULL_STR)?
}
}
+
+ Ok(())
}
}
@@ -97,7 +127,7 @@ fn never_empty(s: &str) -> &str {
fn partition_keys<'a>(
batch: &'a MutableBatch,
template_parts: impl Iterator<Item = TemplatePart<'a>>,
-) -> impl Iterator<Item = String> + 'a {
+) -> impl Iterator<Item = Result<String, PartitionKeyError>> + 'a {
// Extract the timestamp data.
let time = match batch.column(TIME_COLUMN_NAME).map(|v| &v.data) {
Ok(ColumnData::I64(data, _)) => data.as_slice(),
@@ -121,8 +151,7 @@ fn partition_keys<'a>(
// Evaluate each template part for this row
for (col_idx, col) in template.iter().enumerate() {
- col.fmt_row(&mut string, idx)
- .expect("string writing is infallible");
+ col.fmt_row(&mut string, idx)?;
// If this isn't the last element in the template, insert a field
// delimiter.
@@ -131,7 +160,7 @@ fn partition_keys<'a>(
}
}
- string
+ Ok(string)
})
}
@@ -175,6 +204,8 @@ mod tests {
use super::*;
use crate::writer::Writer;
+
+ use assert_matches::assert_matches;
use data_types::partition_template::{build_column_values, test_table_partition_override};
use proptest::{prelude::*, prop_compose, proptest, strategy::Strategy};
use rand::prelude::*;
@@ -199,7 +230,9 @@ mod tests {
writer.commit();
let template_parts = TablePartitionTemplateOverride::new(None, &Default::default());
- let keys: Vec<_> = partition_keys(&batch, template_parts.parts()).collect();
+ let keys: Vec<_> = partition_keys(&batch, template_parts.parts())
+ .collect::<Result<Vec<_>, _>>()
+ .unwrap();
assert_eq!(keys, vec!["1970-01-01".to_string()])
}
@@ -259,7 +292,9 @@ mod tests {
writer.commit();
- let keys: Vec<_> = partition_keys(&batch, template_parts.into_iter()).collect();
+ let keys: Vec<_> = partition_keys(&batch, template_parts.into_iter())
+ .collect::<Result<Vec<_>, _>>()
+ .unwrap();
assert_eq!(
keys,
@@ -274,7 +309,6 @@ mod tests {
}
#[test]
- #[should_panic(expected = "partitioning only works on tag columns, but column was type")]
fn partitioning_on_fields_panics() {
let mut batch = MutableBatch::new();
let mut writer = Writer::new(&mut batch, 5);
@@ -295,7 +329,8 @@ mod tests {
writer.commit();
- let _keys: Vec<_> = partition_keys(&batch, template_parts.into_iter()).collect();
+ let got: Result<Vec<_>, _> = partition_keys(&batch, template_parts.into_iter()).collect();
+ assert_matches::assert_matches!(got, Err(PartitionKeyError::TagValueNotTag(_)));
}
// Generate a test that asserts the derived partition key matches
@@ -333,7 +368,7 @@ mod tests {
writer.commit();
- let keys: Vec<_> = partition_keys(&batch, template.parts()).collect();
+ let keys: Vec<_> = partition_keys(&batch, template.parts()).collect::<Result<Vec<_>, _>>().unwrap();
assert_eq!(keys, vec![$want_key.to_string()]);
// Reverse the encoding.
@@ -420,6 +455,28 @@ mod tests {
want_reversed_tags = [("a", "|"), ("b", "!"), ("d", "%7C%21%257C"), ("e", "^")]
);
+ /// A test using an invalid strftime format string.
+ #[test]
+ fn test_invalid_strftime() {
+ let mut batch = MutableBatch::new();
+ let mut writer = Writer::new(&mut batch, 1);
+
+ writer.write_time("time", vec![1].into_iter()).unwrap();
+ writer
+ .write_tag("region", Some(&[0b00000001]), vec!["bananas"].into_iter())
+ .unwrap();
+ writer.commit();
+
+ let template = [TemplatePart::TimeFormat("%3F")]
+ .into_iter()
+ .collect::<Vec<_>>();
+ let template = test_table_partition_override(template);
+
+ let ret = partition_keys(&batch, template.parts()).collect::<Result<Vec<_>, _>>();
+
+ assert_matches!(ret, Err(PartitionKeyError::InvalidStrftime(_)));
+ }
+
// These values are arbitrarily chosen when building an input to the
// partitioner.
@@ -430,6 +487,7 @@ mod tests {
// Arbitrary template parts are selected from this set.
const TEST_TEMPLATE_PARTS: &[TemplatePart<'static>] = &[
TemplatePart::TimeFormat("%Y|%m|%d!-string"),
+ TemplatePart::TimeFormat("%Y|%m|%d!-%%bananas"),
TemplatePart::TimeFormat("%Y/%m/%d"),
TemplatePart::TimeFormat("%Y-%m-%d"),
TemplatePart::TagValue(""),
@@ -440,6 +498,7 @@ mod tests {
TemplatePart::TagValue("%tags!"),
TemplatePart::TagValue("my_tag"),
TemplatePart::TagValue("my|tag"),
+ TemplatePart::TagValue("%%%%|!!!!|"),
];
prop_compose! {
@@ -493,7 +552,9 @@ mod tests {
}
writer.commit();
- let keys: Vec<_> = partition_keys(&batch, template.parts()).collect();
+ let keys: Vec<_> = partition_keys(&batch, template.parts())
+ .collect::<Result<Vec<_>, _>>()
+ .unwrap();
assert_eq!(keys.len(), 1);
// Reverse the encoding.
@@ -512,5 +573,40 @@ mod tests {
assert_eq!(reversed, want_reversed);
}
+
+ /// A property test that asserts the partitioner tolerates (does not
+ /// panic) randomised, potentially invalid strfitme formatter strings.
+ #[test]
+ fn prop_arbitrary_strftime_format(fmt in any::<String>()) {
+ let mut batch = MutableBatch::new();
+ let mut writer = Writer::new(&mut batch, 1);
+
+ // Generate a single time-based partitioning template with a
+ // randomised format string.
+ let template = vec![
+ TemplatePart::TimeFormat(&fmt),
+ ];
+ let template = test_table_partition_override(template);
+
+ // Timestamp: 2023-05-29T13:03:16Z
+ writer
+ .write_time("time", vec![1685365396931384064].into_iter())
+ .unwrap();
+
+ writer
+ .write_tag("bananas", Some(&[0b00000001]), vec!["great"].into_iter())
+ .unwrap();
+
+ writer.commit();
+ let ret = partition_keys(&batch, template.parts()).collect::<Result<Vec<_>, _>>();
+
+ // The is allowed to succeed or fail under this test (but not
+ // panic), and the returned error/value must match certain
+ // properties:
+ match ret {
+ Ok(v) => { assert_eq!(v.len(), 1); },
+ Err(e) => { assert_matches!(e, PartitionKeyError::InvalidStrftime(_)); },
+ }
+ }
}
}
diff --git a/mutable_batch/tests/writer_fuzz.rs b/mutable_batch/tests/writer_fuzz.rs
index c4e69e91ff..0025f042f2 100644
--- a/mutable_batch/tests/writer_fuzz.rs
+++ b/mutable_batch/tests/writer_fuzz.rs
@@ -438,7 +438,7 @@ fn test_partition_write() {
let table_partition_template =
test_table_partition_override(vec![TemplatePart::TagValue("t1")]);
- let partitioned = PartitionWrite::partition(&batch, &table_partition_template);
+ let partitioned = PartitionWrite::partition(&batch, &table_partition_template).unwrap();
for (_, write) in &partitioned {
verify_write(write);
diff --git a/mutable_batch_pb/tests/encode.rs b/mutable_batch_pb/tests/encode.rs
index e3ac70a05b..f43c2208e9 100644
--- a/mutable_batch_pb/tests/encode.rs
+++ b/mutable_batch_pb/tests/encode.rs
@@ -120,7 +120,7 @@ fn test_encode_decode_null_columns_issue_4272() {
.unwrap();
writer.commit();
- let mut partitions = PartitionWrite::partition(&batch, &Default::default());
+ let mut partitions = PartitionWrite::partition(&batch, &Default::default()).unwrap();
// There should be two partitions, one with for the timestamp 160, and
// one for the other timestamp.
diff --git a/router/src/dml_handlers/partitioner.rs b/router/src/dml_handlers/partitioner.rs
index 21fdab675b..b65c045ced 100644
--- a/router/src/dml_handlers/partitioner.rs
+++ b/router/src/dml_handlers/partitioner.rs
@@ -4,7 +4,7 @@ use data_types::{
PartitionKey, TableId,
};
use hashbrown::HashMap;
-use mutable_batch::{MutableBatch, PartitionWrite, WritePayload};
+use mutable_batch::{MutableBatch, PartitionKeyError, PartitionWrite, WritePayload};
use observability_deps::tracing::*;
use std::sync::Arc;
use thiserror::Error;
@@ -18,6 +18,10 @@ pub enum PartitionError {
/// Failed to write to the partitioned table batch.
#[error("error batching into partitioned write: {0}")]
BatchWrite(#[from] mutable_batch::Error),
+
+ /// An error deriving the partition key from the partition key template.
+ #[error("error generating partition key: {0}")]
+ Partitioner(#[from] PartitionKeyError),
}
/// A decorator of `T`, tagging it with the partition key derived from it.
@@ -76,7 +80,7 @@ impl DmlHandler for Partitioner {
// Partition the table batch according to the configured partition
// template and write it into the partition-keyed map.
for (partition_key, partition_payload) in
- PartitionWrite::partition(&batch, &table_partition_template)
+ PartitionWrite::partition(&batch, &table_partition_template)?
{
let partition = partitions.entry(partition_key).or_default();
let table_batch = partition
diff --git a/router/src/server/http.rs b/router/src/server/http.rs
index 1920611b8d..32baf0a5d7 100644
--- a/router/src/server/http.rs
+++ b/router/src/server/http.rs
@@ -149,6 +149,9 @@ impl From<&DmlError> for StatusCode {
DmlError::Internal(_) => StatusCode::INTERNAL_SERVER_ERROR,
DmlError::Partition(PartitionError::BatchWrite(_)) => StatusCode::INTERNAL_SERVER_ERROR,
+ DmlError::Partition(PartitionError::Partitioner(_)) => {
+ StatusCode::INTERNAL_SERVER_ERROR
+ }
DmlError::Retention(RetentionError::OutsideRetention { .. }) => StatusCode::FORBIDDEN,
DmlError::RpcWrite(RpcWriteError::Client(RpcWriteClientError::Upstream(_))) => {
StatusCode::INTERNAL_SERVER_ERROR
|
ac26ceef91ac23870e4165583793fbd3819bf010
|
Carol (Nichols || Goulding)
|
2023-06-05 14:01:28
|
Make a place to do partition template validation
|
- Create data_types::partition_template::ValidationError
- Make creation of NamespacePartitionTemplateOverride and
TablePartitionTemplateOverride fallible
- Move SerializationWrapper into a module to make its inner field
private to force creation through one fallible constructor; this is
where the validation logic will go to be shared among all uses of
partition templates
| null |
feat: Make a place to do partition template validation
- Create data_types::partition_template::ValidationError
- Make creation of NamespacePartitionTemplateOverride and
TablePartitionTemplateOverride fallible
- Move SerializationWrapper into a module to make its inner field
private to force creation through one fallible constructor; this is
where the validation logic will go to be shared among all uses of
partition templates
|
diff --git a/data_types/src/partition_template.rs b/data_types/src/partition_template.rs
index c29d28d4c1..3b879a9dd8 100644
--- a/data_types/src/partition_template.rs
+++ b/data_types/src/partition_template.rs
@@ -92,6 +92,15 @@ use generated_types::influxdata::iox::partition_template::v1 as proto;
use once_cell::sync::Lazy;
use percent_encoding::{percent_decode_str, AsciiSet, CONTROLS};
use std::{borrow::Cow, sync::Arc};
+use thiserror::Error;
+
+/// TODO: Actually validate
+#[derive(Debug, Error)]
+#[allow(missing_copy_implementations, missing_docs)]
+pub enum ValidationError {
+ #[error("Custom partition template must have at least one part")]
+ NoParts,
+}
/// The sentinel character used to delimit partition key parts in the partition
/// key string.
@@ -145,33 +154,43 @@ pub static PARTITION_BY_DAY_PROTO: Lazy<Arc<proto::PartitionTemplate>> = Lazy::n
/// A partition template specified by a namespace record.
#[derive(Debug, PartialEq, Clone, Default, sqlx::Type)]
#[sqlx(transparent)]
-pub struct NamespacePartitionTemplateOverride(Option<SerializationWrapper>);
+pub struct NamespacePartitionTemplateOverride(Option<serialization::Wrapper>);
+
+impl TryFrom<proto::PartitionTemplate> for NamespacePartitionTemplateOverride {
+ type Error = ValidationError;
-impl From<proto::PartitionTemplate> for NamespacePartitionTemplateOverride {
- fn from(partition_template: proto::PartitionTemplate) -> Self {
- Self(Some(SerializationWrapper(Arc::new(partition_template))))
+ fn try_from(partition_template: proto::PartitionTemplate) -> Result<Self, Self::Error> {
+ Ok(Self(Some(serialization::Wrapper::try_from(
+ partition_template,
+ )?)))
}
}
/// A partition template specified by a table record.
#[derive(Debug, PartialEq, Clone, Default, sqlx::Type)]
#[sqlx(transparent)]
-pub struct TablePartitionTemplateOverride(Option<SerializationWrapper>);
+pub struct TablePartitionTemplateOverride(Option<serialization::Wrapper>);
impl TablePartitionTemplateOverride {
/// When a table is being explicitly created, the creation request might have contained a
/// custom partition template for that table. If the custom partition template is present, use
/// it. Otherwise, use the namespace's partition template.
- pub fn new(
+ ///
+ /// # Errors
+ ///
+ /// This function will return an error if the custom partition template specified is invalid.
+ pub fn try_new(
custom_table_template: Option<proto::PartitionTemplate>,
namespace_template: &NamespacePartitionTemplateOverride,
- ) -> Self {
+ ) -> Result<Self, ValidationError> {
match (custom_table_template, namespace_template.0.as_ref()) {
- (Some(table_proto), _) => Self(Some(SerializationWrapper(Arc::new(table_proto)))),
- (None, Some(namespace_serialization_wrapper)) => Self(Some(SerializationWrapper(
- Arc::clone(&namespace_serialization_wrapper.0),
- ))),
- (None, None) => Self(None),
+ (Some(table_proto), _) => {
+ Ok(Self(Some(serialization::Wrapper::try_from(table_proto)?)))
+ }
+ (None, Some(namespace_serialization_wrapper)) => {
+ Ok(Self(Some(namespace_serialization_wrapper.clone())))
+ }
+ (None, None) => Ok(Self(None)),
}
}
@@ -181,7 +200,7 @@ impl TablePartitionTemplateOverride {
pub fn parts(&self) -> impl Iterator<Item = TemplatePart<'_>> {
self.0
.as_ref()
- .map(|serialization_wrapper| &serialization_wrapper.0)
+ .map(|serialization_wrapper| serialization_wrapper.inner())
.unwrap_or_else(|| &PARTITION_BY_DAY_PROTO)
.parts
.iter()
@@ -197,48 +216,70 @@ impl TablePartitionTemplateOverride {
/// from the database through `sqlx` for the `NamespacePartitionTemplateOverride` and
/// `TablePartitionTemplateOverride` types. It's an internal implementation detail to minimize code
/// duplication.
-#[derive(Debug, Clone, PartialEq)]
-struct SerializationWrapper(Arc<proto::PartitionTemplate>);
-
-impl<DB> sqlx::Type<DB> for SerializationWrapper
-where
- sqlx::types::Json<Self>: sqlx::Type<DB>,
- DB: sqlx::Database,
-{
- fn type_info() -> DB::TypeInfo {
- <sqlx::types::Json<Self> as sqlx::Type<DB>>::type_info()
+mod serialization {
+ use generated_types::influxdata::iox::partition_template::v1 as proto;
+ use std::sync::Arc;
+
+ #[derive(Debug, Clone, PartialEq)]
+ pub struct Wrapper(Arc<proto::PartitionTemplate>);
+
+ impl Wrapper {
+ /// Read access to the inner proto
+ pub fn inner(&self) -> &proto::PartitionTemplate {
+ &self.0
+ }
}
-}
-impl<'q, DB> sqlx::Encode<'q, DB> for SerializationWrapper
-where
- DB: sqlx::Database,
- for<'b> sqlx::types::Json<&'b proto::PartitionTemplate>: sqlx::Encode<'q, DB>,
-{
- fn encode_by_ref(
- &self,
- buf: &mut <DB as sqlx::database::HasArguments<'q>>::ArgumentBuffer,
- ) -> sqlx::encode::IsNull {
- <sqlx::types::Json<&proto::PartitionTemplate> as sqlx::Encode<'_, DB>>::encode_by_ref(
- &sqlx::types::Json(&self.0),
- buf,
- )
+ impl TryFrom<proto::PartitionTemplate> for Wrapper {
+ type Error = super::ValidationError;
+
+ fn try_from(partition_template: proto::PartitionTemplate) -> Result<Self, Self::Error> {
+ Ok(Self(Arc::new(partition_template)))
+ }
+ }
+
+ impl<DB> sqlx::Type<DB> for Wrapper
+ where
+ sqlx::types::Json<Self>: sqlx::Type<DB>,
+ DB: sqlx::Database,
+ {
+ fn type_info() -> DB::TypeInfo {
+ <sqlx::types::Json<Self> as sqlx::Type<DB>>::type_info()
+ }
}
-}
-impl<'q, DB> sqlx::Decode<'q, DB> for SerializationWrapper
-where
- DB: sqlx::Database,
- sqlx::types::Json<proto::PartitionTemplate>: sqlx::Decode<'q, DB>,
-{
- fn decode(
- value: <DB as sqlx::database::HasValueRef<'q>>::ValueRef,
- ) -> Result<Self, Box<dyn std::error::Error + 'static + Send + Sync>> {
- Ok(Self(
- <sqlx::types::Json<proto::PartitionTemplate> as sqlx::Decode<'_, DB>>::decode(value)?
+ impl<'q, DB> sqlx::Encode<'q, DB> for Wrapper
+ where
+ DB: sqlx::Database,
+ for<'b> sqlx::types::Json<&'b proto::PartitionTemplate>: sqlx::Encode<'q, DB>,
+ {
+ fn encode_by_ref(
+ &self,
+ buf: &mut <DB as sqlx::database::HasArguments<'q>>::ArgumentBuffer,
+ ) -> sqlx::encode::IsNull {
+ <sqlx::types::Json<&proto::PartitionTemplate> as sqlx::Encode<'_, DB>>::encode_by_ref(
+ &sqlx::types::Json(&self.0),
+ buf,
+ )
+ }
+ }
+
+ impl<'q, DB> sqlx::Decode<'q, DB> for Wrapper
+ where
+ DB: sqlx::Database,
+ sqlx::types::Json<proto::PartitionTemplate>: sqlx::Decode<'q, DB>,
+ {
+ fn decode(
+ value: <DB as sqlx::database::HasValueRef<'q>>::ValueRef,
+ ) -> Result<Self, Box<dyn std::error::Error + 'static + Send + Sync>> {
+ Ok(Self(
+ <sqlx::types::Json<proto::PartitionTemplate> as sqlx::Decode<'_, DB>>::decode(
+ value,
+ )?
.0
.into(),
- ))
+ ))
+ }
}
}
@@ -327,8 +368,8 @@ pub fn test_table_partition_override(
})
.collect();
- let proto = Arc::new(proto::PartitionTemplate { parts });
- TablePartitionTemplateOverride(Some(SerializationWrapper(proto)))
+ let proto = proto::PartitionTemplate { parts };
+ TablePartitionTemplateOverride(Some(serialization::Wrapper::try_from(proto).unwrap()))
}
#[cfg(test)]
@@ -472,7 +513,7 @@ mod tests {
#[test]
fn test_default_template_fixture() {
let ns = NamespacePartitionTemplateOverride::default();
- let table = TablePartitionTemplateOverride::new(None, &ns);
+ let table = TablePartitionTemplateOverride::try_new(None, &ns).unwrap();
let got = table.parts().collect::<Vec<_>>();
assert_matches!(got.as_slice(), [TemplatePart::TimeFormat("%Y-%m-%d")]);
}
@@ -480,12 +521,14 @@ mod tests {
#[test]
fn no_custom_table_template_specified_gets_namespace_template() {
let namespace_template =
- NamespacePartitionTemplateOverride::from(proto::PartitionTemplate {
+ NamespacePartitionTemplateOverride::try_from(proto::PartitionTemplate {
parts: vec![proto::TemplatePart {
part: Some(proto::template_part::Part::TimeFormat("year-%Y".into())),
}],
- });
- let table_template = TablePartitionTemplateOverride::new(None, &namespace_template);
+ })
+ .unwrap();
+ let table_template =
+ TablePartitionTemplateOverride::try_new(None, &namespace_template).unwrap();
assert_eq!(table_template.0, namespace_template.0);
}
@@ -498,17 +541,19 @@ mod tests {
}],
};
let namespace_template =
- NamespacePartitionTemplateOverride::from(proto::PartitionTemplate {
+ NamespacePartitionTemplateOverride::try_from(proto::PartitionTemplate {
parts: vec![proto::TemplatePart {
part: Some(proto::template_part::Part::TimeFormat("year-%Y".into())),
}],
- });
- let table_template = TablePartitionTemplateOverride::new(
+ })
+ .unwrap();
+ let table_template = TablePartitionTemplateOverride::try_new(
Some(custom_table_template.clone()),
&namespace_template,
- );
+ )
+ .unwrap();
- assert_eq!(table_template.0.unwrap().0.as_ref(), &custom_table_template);
+ assert_eq!(table_template.0.unwrap().inner(), &custom_table_template);
}
// The JSON representation of the partition template protobuf is stored in the database, so
@@ -532,7 +577,7 @@ mod tests {
{\"timeFormat\":\"year-%Y\"}\
]}";
- let namespace = NamespacePartitionTemplateOverride::from(custom_template);
+ let namespace = NamespacePartitionTemplateOverride::try_from(custom_template).unwrap();
let mut buf = Default::default();
let _ = <NamespacePartitionTemplateOverride as Encode<'_, sqlx::Sqlite>>::encode_by_ref(
&namespace, &mut buf,
@@ -550,7 +595,7 @@ mod tests {
let namespace_json_str: String = buf.iter().map(extract_sqlite_argument_text).collect();
assert_eq!(namespace_json_str, expected_json_str);
- let table = TablePartitionTemplateOverride::new(None, &namespace);
+ let table = TablePartitionTemplateOverride::try_new(None, &namespace).unwrap();
let mut buf = Default::default();
let _ = <TablePartitionTemplateOverride as Encode<'_, sqlx::Sqlite>>::encode_by_ref(
&table, &mut buf,
diff --git a/ingester_test_ctx/src/lib.rs b/ingester_test_ctx/src/lib.rs
index 82d69345ba..98837381de 100644
--- a/ingester_test_ctx/src/lib.rs
+++ b/ingester_test_ctx/src/lib.rs
@@ -252,7 +252,7 @@ where
.get_mut(&namespace_id)
.expect("namespace does not exist");
let partition_template =
- TablePartitionTemplateOverride::new(None, &schema.partition_template);
+ TablePartitionTemplateOverride::try_new(None, &schema.partition_template).unwrap();
let batches = lines_to_batches(lp, 0).unwrap();
diff --git a/iox_catalog/src/interface.rs b/iox_catalog/src/interface.rs
index 88f5f16828..627ba6758f 100644
--- a/iox_catalog/src/interface.rs
+++ b/iox_catalog/src/interface.rs
@@ -146,6 +146,11 @@ pub enum Error {
#[snafu(display("could not delete namespace: {source}"))]
CouldNotDeleteNamespace { source: sqlx::Error },
+
+ #[snafu(display("invalid partition template: {source}"))]
+ InvalidPartitionTemplate {
+ source: data_types::partition_template::ValidationError,
+ },
}
/// A specialized `Error` for Catalog errors
@@ -870,11 +875,12 @@ pub(crate) mod test_helpers {
// create a namespace with a PartitionTemplate other than the default
let tag_partition_template =
- NamespacePartitionTemplateOverride::from(proto::PartitionTemplate {
+ NamespacePartitionTemplateOverride::try_from(proto::PartitionTemplate {
parts: vec![proto::TemplatePart {
part: Some(proto::template_part::Part::TagValue("tag1".into())),
}],
- });
+ })
+ .unwrap();
let namespace5_name = NamespaceName::new("test_namespace5").unwrap();
let namespace5 = repos
.namespaces()
@@ -1097,7 +1103,8 @@ pub(crate) mod test_helpers {
.tables()
.create(
"test_table",
- TablePartitionTemplateOverride::new(None, &namespace.partition_template),
+ TablePartitionTemplateOverride::try_new(None, &namespace.partition_template)
+ .unwrap(),
namespace.id,
)
.await;
@@ -1192,7 +1199,7 @@ pub(crate) mod test_helpers {
.tables()
.create(
"definitely_unique",
- TablePartitionTemplateOverride::new(None, &latest.partition_template),
+ TablePartitionTemplateOverride::try_new(None, &latest.partition_template).unwrap(),
latest.id,
)
.await
@@ -1206,7 +1213,7 @@ pub(crate) mod test_helpers {
));
// Create a table with a partition template other than the default
- let custom_table_template = TablePartitionTemplateOverride::new(
+ let custom_table_template = TablePartitionTemplateOverride::try_new(
Some(proto::PartitionTemplate {
parts: vec![
proto::TemplatePart {
@@ -1221,8 +1228,8 @@ pub(crate) mod test_helpers {
],
}),
&namespace2.partition_template,
- );
-
+ )
+ .unwrap();
let templated = repos
.tables()
.create(
@@ -1256,7 +1263,7 @@ pub(crate) mod test_helpers {
// Create a namespace with a partition template other than the default
let custom_namespace_template =
- NamespacePartitionTemplateOverride::from(proto::PartitionTemplate {
+ NamespacePartitionTemplateOverride::try_from(proto::PartitionTemplate {
parts: vec![
proto::TemplatePart {
part: Some(proto::template_part::Part::TagValue("zzz".into())),
@@ -1268,7 +1275,8 @@ pub(crate) mod test_helpers {
part: Some(proto::template_part::Part::TimeFormat("year-%Y".into())),
},
],
- });
+ })
+ .unwrap();
let custom_namespace_name = NamespaceName::new("custom_namespace").unwrap();
let custom_namespace = repos
.namespaces()
@@ -1281,7 +1289,8 @@ pub(crate) mod test_helpers {
.unwrap();
// Create a table without specifying the partition template
let custom_table_template =
- TablePartitionTemplateOverride::new(None, &custom_namespace.partition_template);
+ TablePartitionTemplateOverride::try_new(None, &custom_namespace.partition_template)
+ .unwrap();
let table_templated_by_namespace = repos
.tables()
.create(
@@ -1293,7 +1302,7 @@ pub(crate) mod test_helpers {
.unwrap();
assert_eq!(
table_templated_by_namespace.partition_template,
- TablePartitionTemplateOverride::new(None, &custom_namespace_template)
+ TablePartitionTemplateOverride::try_new(None, &custom_namespace_template).unwrap()
);
// Tag columns should be created for tags used in the template
diff --git a/iox_catalog/src/lib.rs b/iox_catalog/src/lib.rs
index f47b2eaf4c..9430a37cf5 100644
--- a/iox_catalog/src/lib.rs
+++ b/iox_catalog/src/lib.rs
@@ -228,7 +228,8 @@ where
// This table is being created implicitly by this write, so there's no
// possibility of a user-supplied partition template here, which is why there's
// a hardcoded `None`.
- TablePartitionTemplateOverride::new(None, namespace_partition_template),
+ TablePartitionTemplateOverride::try_new(None, namespace_partition_template)
+ .map_err(|source| Error::InvalidPartitionTemplate { source })?,
namespace_id,
)
.await;
@@ -311,7 +312,8 @@ pub mod test_helpers {
.tables()
.create(
name,
- TablePartitionTemplateOverride::new(None, &namespace.partition_template),
+ TablePartitionTemplateOverride::try_new(None, &namespace.partition_template)
+ .unwrap(),
namespace.id,
)
.await
diff --git a/iox_catalog/src/postgres.rs b/iox_catalog/src/postgres.rs
index 9b5e3ab4ba..fb1a793b1e 100644
--- a/iox_catalog/src/postgres.rs
+++ b/iox_catalog/src/postgres.rs
@@ -2311,13 +2311,14 @@ RETURNING id, name, retention_period_ns, max_tables, max_columns_per_table, dele
// assume it's important that it's being specially requested and store it rather than NULL.
let namespace_custom_template_name = "kumquats";
let custom_partition_template_equal_to_default =
- NamespacePartitionTemplateOverride::from(proto::PartitionTemplate {
+ NamespacePartitionTemplateOverride::try_from(proto::PartitionTemplate {
parts: vec![proto::TemplatePart {
part: Some(proto::template_part::Part::TimeFormat(
"%Y-%m-%d".to_owned(),
)),
}],
- });
+ })
+ .unwrap();
let namespace_custom_template = repos
.namespaces()
.create(
@@ -2371,13 +2372,14 @@ RETURNING id, name, retention_period_ns, max_tables, max_columns_per_table, dele
.namespaces()
.create(
&namespace_custom_template_name.try_into().unwrap(),
- Some(NamespacePartitionTemplateOverride::from(
- proto::PartitionTemplate {
+ Some(
+ NamespacePartitionTemplateOverride::try_from(proto::PartitionTemplate {
parts: vec![proto::TemplatePart {
part: Some(proto::template_part::Part::TimeFormat("year-%Y".into())),
}],
- },
- )),
+ })
+ .unwrap(),
+ ),
None,
)
.await
@@ -2474,10 +2476,11 @@ RETURNING *;
.tables()
.create(
"pomelo",
- TablePartitionTemplateOverride::new(
+ TablePartitionTemplateOverride::try_new(
None, // no custom partition template
&namespace_custom_template.partition_template,
- ),
+ )
+ .unwrap(),
namespace_custom_template.id,
)
.await
@@ -2486,10 +2489,11 @@ RETURNING *;
// it should have the namespace's template
assert_eq!(
table_no_template_with_namespace_template.partition_template,
- TablePartitionTemplateOverride::new(
+ TablePartitionTemplateOverride::try_new(
None,
&namespace_custom_template.partition_template
)
+ .unwrap()
);
// and store that value in the database record.
@@ -2504,10 +2508,11 @@ RETURNING *;
record.try_get("partition_template").unwrap();
assert_eq!(
partition_template.unwrap(),
- TablePartitionTemplateOverride::new(
+ TablePartitionTemplateOverride::try_new(
None,
&namespace_custom_template.partition_template
)
+ .unwrap()
);
// # Table template true, namespace template false
@@ -2523,10 +2528,11 @@ RETURNING *;
.tables()
.create(
"tangerine",
- TablePartitionTemplateOverride::new(
+ TablePartitionTemplateOverride::try_new(
Some(custom_table_template), // with custom partition template
&namespace_default_template.partition_template,
- ),
+ )
+ .unwrap(),
namespace_default_template.id,
)
.await
@@ -2575,10 +2581,11 @@ RETURNING *;
.tables()
.create(
"nectarine",
- TablePartitionTemplateOverride::new(
+ TablePartitionTemplateOverride::try_new(
Some(custom_table_template), // with custom partition template
&namespace_custom_template.partition_template,
- ),
+ )
+ .unwrap(),
namespace_custom_template.id,
)
.await
@@ -2622,10 +2629,11 @@ RETURNING *;
.tables()
.create(
"grapefruit",
- TablePartitionTemplateOverride::new(
+ TablePartitionTemplateOverride::try_new(
None, // no custom partition template
&namespace_default_template.partition_template,
- ),
+ )
+ .unwrap(),
namespace_default_template.id,
)
.await
diff --git a/iox_catalog/src/sqlite.rs b/iox_catalog/src/sqlite.rs
index 9bf4038969..83238ac083 100644
--- a/iox_catalog/src/sqlite.rs
+++ b/iox_catalog/src/sqlite.rs
@@ -1915,13 +1915,14 @@ RETURNING id, name, retention_period_ns, max_tables, max_columns_per_table, dele
// assume it's important that it's being specially requested and store it rather than NULL.
let namespace_custom_template_name = "kumquats";
let custom_partition_template_equal_to_default =
- NamespacePartitionTemplateOverride::from(proto::PartitionTemplate {
+ NamespacePartitionTemplateOverride::try_from(proto::PartitionTemplate {
parts: vec![proto::TemplatePart {
part: Some(proto::template_part::Part::TimeFormat(
"%Y-%m-%d".to_owned(),
)),
}],
- });
+ })
+ .unwrap();
let namespace_custom_template = repos
.namespaces()
.create(
@@ -1973,13 +1974,14 @@ RETURNING id, name, retention_period_ns, max_tables, max_columns_per_table, dele
.namespaces()
.create(
&namespace_custom_template_name.try_into().unwrap(),
- Some(NamespacePartitionTemplateOverride::from(
- proto::PartitionTemplate {
+ Some(
+ NamespacePartitionTemplateOverride::try_from(proto::PartitionTemplate {
parts: vec![proto::TemplatePart {
part: Some(proto::template_part::Part::TimeFormat("year-%Y".into())),
}],
- },
- )),
+ })
+ .unwrap(),
+ ),
None,
)
.await
@@ -2076,10 +2078,11 @@ RETURNING *;
.tables()
.create(
"pomelo",
- TablePartitionTemplateOverride::new(
+ TablePartitionTemplateOverride::try_new(
None, // no custom partition template
&namespace_custom_template.partition_template,
- ),
+ )
+ .unwrap(),
namespace_custom_template.id,
)
.await
@@ -2088,10 +2091,11 @@ RETURNING *;
// it should have the namespace's template
assert_eq!(
table_no_template_with_namespace_template.partition_template,
- TablePartitionTemplateOverride::new(
+ TablePartitionTemplateOverride::try_new(
None,
&namespace_custom_template.partition_template
)
+ .unwrap()
);
// and store that value in the database record.
@@ -2106,10 +2110,11 @@ RETURNING *;
record.try_get("partition_template").unwrap();
assert_eq!(
partition_template.unwrap(),
- TablePartitionTemplateOverride::new(
+ TablePartitionTemplateOverride::try_new(
None,
&namespace_custom_template.partition_template
)
+ .unwrap()
);
// # Table template true, namespace template false
@@ -2125,10 +2130,11 @@ RETURNING *;
.tables()
.create(
"tangerine",
- TablePartitionTemplateOverride::new(
+ TablePartitionTemplateOverride::try_new(
Some(custom_table_template), // with custom partition template
&namespace_default_template.partition_template,
- ),
+ )
+ .unwrap(),
namespace_default_template.id,
)
.await
@@ -2177,10 +2183,11 @@ RETURNING *;
.tables()
.create(
"nectarine",
- TablePartitionTemplateOverride::new(
+ TablePartitionTemplateOverride::try_new(
Some(custom_table_template), // with custom partition template
&namespace_custom_template.partition_template,
- ),
+ )
+ .unwrap(),
namespace_custom_template.id,
)
.await
@@ -2224,10 +2231,11 @@ RETURNING *;
.tables()
.create(
"grapefruit",
- TablePartitionTemplateOverride::new(
+ TablePartitionTemplateOverride::try_new(
None, // no custom partition template
&namespace_default_template.partition_template,
- ),
+ )
+ .unwrap(),
namespace_default_template.id,
)
.await
diff --git a/mutable_batch/src/payload/partition.rs b/mutable_batch/src/payload/partition.rs
index 3319ea9ed5..24ca4763cf 100644
--- a/mutable_batch/src/payload/partition.rs
+++ b/mutable_batch/src/payload/partition.rs
@@ -231,7 +231,8 @@ mod tests {
.unwrap();
writer.commit();
- let template_parts = TablePartitionTemplateOverride::new(None, &Default::default());
+ let template_parts =
+ TablePartitionTemplateOverride::try_new(None, &Default::default()).unwrap();
let keys: Vec<_> = partition_keys(&batch, template_parts.parts())
.collect::<Result<Vec<_>, _>>()
.unwrap();
diff --git a/router/benches/partitioner.rs b/router/benches/partitioner.rs
index 3545581a82..9638b3143d 100644
--- a/router/benches/partitioner.rs
+++ b/router/benches/partitioner.rs
@@ -157,9 +157,11 @@ fn bench(
// Un-normalise the path, adjusting back to the crate root.
let file_path = format!("{}/../{}", env!("CARGO_MANIFEST_DIR"), file_path);
let path = Path::new(&file_path);
- let partition_template = NamespacePartitionTemplateOverride::from(proto::PartitionTemplate {
- parts: partition_template,
- });
+ let partition_template =
+ NamespacePartitionTemplateOverride::try_from(proto::PartitionTemplate {
+ parts: partition_template,
+ })
+ .unwrap();
let schema = Arc::new(NamespaceSchema {
id: NamespaceId::new(42),
@@ -187,7 +189,7 @@ fn bench(
TableId::new(i as _),
(
name,
- TablePartitionTemplateOverride::new(None, &partition_template),
+ TablePartitionTemplateOverride::try_new(None, &partition_template).unwrap(),
payload,
),
)
diff --git a/service_grpc_namespace/src/lib.rs b/service_grpc_namespace/src/lib.rs
index 1f0b026e4c..07ea7036f2 100644
--- a/service_grpc_namespace/src/lib.rs
+++ b/service_grpc_namespace/src/lib.rs
@@ -89,7 +89,10 @@ impl namespace_service_server::NamespaceService for NamespaceService {
.namespaces()
.create(
&namespace_name,
- partition_template.map(NamespacePartitionTemplateOverride::from),
+ partition_template
+ .map(NamespacePartitionTemplateOverride::try_from)
+ .transpose()
+ .map_err(|v| Status::invalid_argument(v.to_string()))?,
retention_period_ns,
)
.await
diff --git a/service_grpc_table/src/lib.rs b/service_grpc_table/src/lib.rs
index 6e22548363..524a1d12c1 100644
--- a/service_grpc_table/src/lib.rs
+++ b/service_grpc_table/src/lib.rs
@@ -81,10 +81,11 @@ impl table_service_server::TableService for TableService {
.tables()
.create(
&name,
- TablePartitionTemplateOverride::new(
+ TablePartitionTemplateOverride::try_new(
partition_template,
&namespace.partition_template,
- ),
+ )
+ .map_err(|v| Status::invalid_argument(v.to_string()))?,
namespace.id,
)
.await
@@ -312,8 +313,8 @@ mod tests {
.namespaces()
.create(
&namespace_name,
- Some(NamespacePartitionTemplateOverride::from(
- PartitionTemplate {
+ Some(
+ NamespacePartitionTemplateOverride::try_from(PartitionTemplate {
parts: vec![
TemplatePart {
part: Some(template_part::Part::TagValue("color".into())),
@@ -325,8 +326,9 @@ mod tests {
part: Some(template_part::Part::TimeFormat("%Y".into())),
},
],
- },
- )),
+ })
+ .unwrap(),
+ ),
None,
)
.await
|
c49c6159ef500c1bf102b943c945595efd0fb9d4
|
Marco Neumann
|
2023-09-13 17:25:38
|
change "normalization" in projected schema cache (#8720)
|
* refactor: "projected schema" cache inputs must be normalized
Normalizing under the hood and returning normalized schemas w/o the user
knowing about it is a good source for subtle bugs.
* refactor: do not normalize projected schema by name
Normalizing makes it harder to predict the output and potentially
requires additional string lookups just to work with the schema.
* fix: typos
Co-authored-by: Andrew Lamb <[email protected]>
Co-authored-by: Martin Hilton <[email protected]>
---------
|
Co-authored-by: Andrew Lamb <[email protected]>
Co-authored-by: Martin Hilton <[email protected]>
|
refactor: change "normalization" in projected schema cache (#8720)
* refactor: "projected schema" cache inputs must be normalized
Normalizing under the hood and returning normalized schemas w/o the user
knowing about it is a good source for subtle bugs.
* refactor: do not normalize projected schema by name
Normalizing makes it harder to predict the output and potentially
requires additional string lookups just to work with the schema.
* fix: typos
Co-authored-by: Andrew Lamb <[email protected]>
Co-authored-by: Martin Hilton <[email protected]>
---------
Co-authored-by: Andrew Lamb <[email protected]>
Co-authored-by: Martin Hilton <[email protected]>
|
diff --git a/querier/src/cache/projected_schema.rs b/querier/src/cache/projected_schema.rs
index d77ab21ba7..750198b828 100644
--- a/querier/src/cache/projected_schema.rs
+++ b/querier/src/cache/projected_schema.rs
@@ -35,10 +35,14 @@ struct CacheKey {
impl CacheKey {
/// Create new key.
///
- /// This normalizes `projection`.
- fn new(table_id: TableId, mut projection: Box<[ColumnId]>) -> Self {
- // normalize column order
- projection.sort();
+ /// # Panics
+ /// Panics if projection sort order is not normalized.
+ fn new(table_id: TableId, projection: Box<[ColumnId]>) -> Self {
+ assert!(
+ projection.windows(2).all(|c| c[0] < c[1]),
+ "projection order not normalized: {:?}",
+ projection,
+ );
Self {
table_id,
@@ -79,7 +83,7 @@ impl ProjectedSchemaCache {
FunctionLoader::new(move |key: CacheKey, table: Arc<CachedTable>| async move {
assert_eq!(key.table_id, table.id);
- let mut projection: Vec<&str> = key
+ let projection: Vec<&str> = key
.projection
.iter()
.map(|id| {
@@ -91,9 +95,6 @@ impl ProjectedSchemaCache {
})
.collect();
- // order by name since IDs are rather arbitrary
- projection.sort();
-
table
.schema
.select_by_names(&projection)
@@ -131,13 +132,13 @@ impl ProjectedSchemaCache {
/// Get projected schema for given table.
///
/// # Key
- /// The cache will is `table_id` combined with `projection`. The projection order is normalized.
+ /// The cache will is `table_id` combined with `projection`. The given projection order must be normalized.
///
/// The `table_schema` is NOT part of the cache key. It is OK to update the table schema (i.e. add new columns)
/// between requests. The caller however MUST ensure that the `table_id` is correct.
///
/// # Panic
- /// Will panic if any column in `projection` is missing in `table_schema`.
+ /// Will panic if any column in `projection` is missing in `table_schema` or if the given projection is NOT sorted.
pub async fn get(
&self,
table: Arc<CachedTable>,
@@ -162,7 +163,7 @@ mod tests {
use super::*;
#[tokio::test]
- async fn test() {
+ async fn test_get() {
let cache = ProjectedSchemaCache::new(
Arc::new(SystemProvider::new()),
&metric::Registry::new(),
@@ -275,15 +276,12 @@ mod tests {
.await;
assert!(Arc::ptr_eq(projection_1.inner(), projection_3.inner()));
- // different column order
+ // subset
+ let expected = SchemaBuilder::new().tag("t1").build().unwrap();
let projection_4 = cache
- .get(
- Arc::clone(&table_1a),
- [ColumnId::new(2), ColumnId::new(1)].into(),
- None,
- )
+ .get(Arc::clone(&table_1a), [ColumnId::new(1)].into(), None)
.await;
- assert!(Arc::ptr_eq(projection_1.inner(), projection_4.inner()));
+ assert_eq!(projection_4, expected);
// different columns set
let expected = SchemaBuilder::new().tag("t1").tag("t3").build().unwrap();
@@ -316,6 +314,33 @@ mod tests {
)
.await;
assert!(Arc::ptr_eq(projection_1.inner(), projection_7.inner()));
+
+ //
+ }
+
+ #[tokio::test]
+ #[should_panic(expected = "projection order not normalized")]
+ async fn test_panic_projection_order_not_normalized() {
+ let cache = ProjectedSchemaCache::new(
+ Arc::new(SystemProvider::new()),
+ &metric::Registry::new(),
+ test_ram_pool(),
+ true,
+ );
+
+ let table = Arc::new(CachedTable {
+ id: TableId::new(1),
+ schema: SchemaBuilder::default().build().unwrap(),
+ column_id_map: HashMap::default(),
+ column_id_map_rev: HashMap::default(),
+ primary_key_column_ids: [].into(),
+ partition_template: TablePartitionTemplateOverride::default(),
+ });
+
+ // different column order
+ cache
+ .get(table, [ColumnId::new(2), ColumnId::new(1)].into(), None)
+ .await;
}
fn reverse_map<K, V>(map: &HashMap<K, V>) -> HashMap<V, K>
diff --git a/querier/src/parquet/mod.rs b/querier/src/parquet/mod.rs
index c3e2dafaa4..03e2e4eaba 100644
--- a/querier/src/parquet/mod.rs
+++ b/querier/src/parquet/mod.rs
@@ -296,9 +296,9 @@ pub mod tests {
fn assert_schema(chunk: &QuerierParquetChunk) {
let expected_schema = SchemaBuilder::new()
+ .tag("tag1")
.field("field_int", DataType::Int64)
.unwrap()
- .tag("tag1")
.timestamp()
.build()
.unwrap();
@@ -328,13 +328,13 @@ pub mod tests {
assert_batches_eq!(
&[
- "+-----------+------+-----------------------------+",
- "| field_int | tag1 | time |",
- "+-----------+------+-----------------------------+",
- "| 70 | UT | 1970-01-01T00:00:00.000020Z |",
- "| 10 | VT | 1970-01-01T00:00:00.000010Z |",
- "| 1000 | WA | 1970-01-01T00:00:00.000008Z |",
- "+-----------+------+-----------------------------+",
+ "+------+-----------+-----------------------------+",
+ "| tag1 | field_int | time |",
+ "+------+-----------+-----------------------------+",
+ "| UT | 70 | 1970-01-01T00:00:00.000020Z |",
+ "| VT | 10 | 1970-01-01T00:00:00.000010Z |",
+ "| WA | 1000 | 1970-01-01T00:00:00.000008Z |",
+ "+------+-----------+-----------------------------+",
],
&batches
);
|
7cb1636c64fb377eee14f72d972f53e4ee115916
|
Dom Dwyer
|
2022-12-03 17:26:42
|
do not panic when writer disconnects
|
When the RPC write disconnects without waiting for completion, the WAL
panics as there is no longer a consumer of the "committed" ACK.
| null |
fix: do not panic when writer disconnects
When the RPC write disconnects without waiting for completion, the WAL
panics as there is no longer a consumer of the "committed" ACK.
|
diff --git a/wal/src/lib.rs b/wal/src/lib.rs
index 73c84ba9db..1ca56ab444 100644
--- a/wal/src/lib.rs
+++ b/wal/src/lib.rs
@@ -458,7 +458,8 @@ impl OpenSegmentFile {
match req {
Write(tx, data) => {
let x = open_write.write(&data).unwrap();
- tx.send(x).unwrap();
+ // Ignore send errors - the caller may have disconnected.
+ let _ = tx.send(x);
}
Rotate(tx, ()) => {
|
77cbc880f636391b6e15762aa00a958a4790fa14
|
Nga Tran
|
2022-12-01 16:23:44
|
Add cap limit on number of partitions to be compacted in parallel (#6305)
|
* feat: Add cap limit on number of partitions to be comapcted in parallel
* chore: cleanup
* chore: clearer comments
| null |
feat: Add cap limit on number of partitions to be compacted in parallel (#6305)
* feat: Add cap limit on number of partitions to be comapcted in parallel
* chore: cleanup
* chore: clearer comments
|
diff --git a/clap_blocks/src/compactor.rs b/clap_blocks/src/compactor.rs
index 7732c6b785..78eed150c8 100644
--- a/clap_blocks/src/compactor.rs
+++ b/clap_blocks/src/compactor.rs
@@ -208,6 +208,18 @@ macro_rules! gen_compactor_config {
action
)]
pub hot_compaction_hours_threshold_2: u64,
+
+ /// Max number of partitions that can be compacted in parallel at once
+ /// We use memory budget to estimate how many partitions can be compacted in parallel at once.
+ /// However, we do not want to have that number too large which will cause the high usage of CPU cores
+ /// and may also lead to inaccuracy of memory estimation. This number is to cap that.
+ #[clap(
+ long = "compaction-max-parallel-partitions",
+ env = "INFLUXDB_IOX_COMPACTION_MAX_PARALLEL_PARTITIONS",
+ default_value = "20",
+ action
+ )]
+ pub max_parallel_partitions: u64,
}
};
}
@@ -239,6 +251,7 @@ impl CompactorOnceConfig {
minutes_without_new_writes_to_be_cold: self.minutes_without_new_writes_to_be_cold,
hot_compaction_hours_threshold_1: self.hot_compaction_hours_threshold_1,
hot_compaction_hours_threshold_2: self.hot_compaction_hours_threshold_2,
+ max_parallel_partitions: self.max_parallel_partitions,
}
}
}
diff --git a/compactor/src/cold.rs b/compactor/src/cold.rs
index 7191e08e6e..30b1a5e47e 100644
--- a/compactor/src/cold.rs
+++ b/compactor/src/cold.rs
@@ -114,6 +114,7 @@ mod tests {
const DEFAULT_HOT_COMPACTION_HOURS_THRESHOLD_1: u64 = 4;
const DEFAULT_HOT_COMPACTION_HOURS_THRESHOLD_2: u64 = 24;
+ const DEFAULT_MAX_PARALLEL_PARTITIONS: u64 = 20;
#[tokio::test]
async fn test_compact_remaining_level_0_files_many_files() {
@@ -710,6 +711,7 @@ mod tests {
minutes_without_new_writes_to_be_cold: 10,
hot_compaction_hours_threshold_1: DEFAULT_HOT_COMPACTION_HOURS_THRESHOLD_1,
hot_compaction_hours_threshold_2: DEFAULT_HOT_COMPACTION_HOURS_THRESHOLD_2,
+ max_parallel_partitions: DEFAULT_MAX_PARALLEL_PARTITIONS,
}
}
diff --git a/compactor/src/compact.rs b/compactor/src/compact.rs
index 5fb9d49245..cdad6bbe78 100644
--- a/compactor/src/compact.rs
+++ b/compactor/src/compact.rs
@@ -562,6 +562,7 @@ pub mod tests {
const DEFAULT_HOT_COMPACTION_HOURS_THRESHOLD_1: u64 = 4;
const DEFAULT_HOT_COMPACTION_HOURS_THRESHOLD_2: u64 = 24;
+ const DEFAULT_MAX_PARALLEL_PARTITIONS: u64 = 20;
impl PartitionCompactionCandidateWithInfo {
pub(crate) async fn from_test_partition(test_partition: &TestPartition) -> Self {
@@ -697,6 +698,7 @@ pub mod tests {
minutes_without_new_writes_to_be_cold: 10,
hot_compaction_hours_threshold_1: DEFAULT_HOT_COMPACTION_HOURS_THRESHOLD_1,
hot_compaction_hours_threshold_2: DEFAULT_HOT_COMPACTION_HOURS_THRESHOLD_2,
+ max_parallel_partitions: DEFAULT_MAX_PARALLEL_PARTITIONS,
}
}
diff --git a/compactor/src/handler.rs b/compactor/src/handler.rs
index 2d5b4f1cc3..e51940ee59 100644
--- a/compactor/src/handler.rs
+++ b/compactor/src/handler.rs
@@ -173,6 +173,12 @@ pub struct CompactorConfig {
/// When querying for partitions with data for hot compaction, how many hours to look
/// back for a second pass if we found nothing in the first pass.
pub hot_compaction_hours_threshold_2: u64,
+
+ /// Max number of partitions that can be compacted in parallel at once
+ /// We use memory budget to estimate how many partitions can be compacted in parallel at once.
+ /// However, we do not want to have that number too large which will cause the high usage of CPU cores
+ /// and may also lead to inaccuracy of memory estimation. This number is to cap that.
+ pub max_parallel_partitions: u64,
}
/// How long to pause before checking for more work again if there was
diff --git a/compactor/src/hot.rs b/compactor/src/hot.rs
index af4cfbfd4a..447a841e58 100644
--- a/compactor/src/hot.rs
+++ b/compactor/src/hot.rs
@@ -224,6 +224,7 @@ mod tests {
const DEFAULT_HOT_COMPACTION_HOURS_THRESHOLD_1: u64 = 4;
const DEFAULT_HOT_COMPACTION_HOURS_THRESHOLD_2: u64 = 24;
+ const DEFAULT_MAX_PARALLEL_PARTITIONS: u64 = 20;
struct TestSetup {
catalog: Arc<TestCatalog>,
@@ -544,6 +545,7 @@ mod tests {
minutes_without_new_writes_to_be_cold: 10,
hot_compaction_hours_threshold_1: DEFAULT_HOT_COMPACTION_HOURS_THRESHOLD_1,
hot_compaction_hours_threshold_2: DEFAULT_HOT_COMPACTION_HOURS_THRESHOLD_2,
+ max_parallel_partitions: DEFAULT_MAX_PARALLEL_PARTITIONS,
};
let compactor = Arc::new(Compactor::new(
vec![shard1.shard.id, shard2.shard.id],
diff --git a/compactor/src/lib.rs b/compactor/src/lib.rs
index b867b3ce87..d44de50bf2 100644
--- a/compactor/src/lib.rs
+++ b/compactor/src/lib.rs
@@ -238,18 +238,22 @@ async fn compact_candidates_with_memory_budget<C, Fut>(
}
// --------------------------------------------------------------------
- // 4. Almost hitting max budget (only 10% left)
- // OR no more candidates
- // OR already considered all remaining candidates.
+ // 4. Let compact the candidates that are in parallel_compacting_candidates if one of this condition hits:
+ // . candidates in parallel_compacting_candidates consume almost all the budget
+ // . no more candidates
+ // . already considered all remaining candidates.
+ // . hit the max number of partitions to compact in parallel
if (!parallel_compacting_candidates.is_empty())
&& ((remaining_budget_bytes <= (compactor.config.memory_budget_bytes / 10) as u64)
|| (candidates.is_empty())
- || (count == num_remaining_candidates))
+ || (count == num_remaining_candidates)
+ || (count as u64 == compactor.config.max_parallel_partitions))
{
debug!(
num_parallel_compacting_candidates = parallel_compacting_candidates.len(),
total_needed_memory_budget_bytes =
compactor.config.memory_budget_bytes - remaining_budget_bytes,
+ config_max_parallel_partitions = compactor.config.max_parallel_partitions,
compaction_type,
"parallel compacting candidate"
);
@@ -452,7 +456,9 @@ pub mod tests {
use arrow_util::assert_batches_sorted_eq;
use backoff::BackoffConfig;
use data_types::{ColumnType, CompactionLevel, ParquetFileId};
- use iox_tests::util::{TestCatalog, TestParquetFileBuilder, TestShard, TestTable};
+ use iox_tests::util::{
+ TestCatalog, TestParquetFileBuilder, TestPartition, TestShard, TestTable,
+ };
use iox_time::{SystemProvider, TimeProvider};
use std::{
collections::{HashMap, VecDeque},
@@ -462,6 +468,7 @@ pub mod tests {
const DEFAULT_HOT_COMPACTION_HOURS_THRESHOLD_1: u64 = 4;
const DEFAULT_HOT_COMPACTION_HOURS_THRESHOLD_2: u64 = 24;
+ const DEFAULT_MAX_PARALLEL_PARTITIONS: u64 = 20;
// In tests that are verifying successful compaction not affected by the memory budget, this
// converts a `parquet_file_filtering::FilteredFiles` that has a `filter_result` of
@@ -500,7 +507,7 @@ pub mod tests {
compactor,
mock_compactor,
..
- } = test_setup(14350).await;
+ } = test_setup(14350, 20).await;
let sorted_candidates = VecDeque::new();
@@ -563,7 +570,7 @@ pub mod tests {
}
}
- fn make_compactor_config(budget: u64) -> CompactorConfig {
+ fn make_compactor_config(budget: u64, max_parallel_jobs: u64) -> CompactorConfig {
// All numbers in here are chosen carefully for many tests.
// Change them will break the tests
CompactorConfig {
@@ -580,6 +587,7 @@ pub mod tests {
minutes_without_new_writes_to_be_cold: 10,
hot_compaction_hours_threshold_1: DEFAULT_HOT_COMPACTION_HOURS_THRESHOLD_1,
hot_compaction_hours_threshold_2: DEFAULT_HOT_COMPACTION_HOURS_THRESHOLD_2,
+ max_parallel_partitions: max_parallel_jobs,
}
}
@@ -591,10 +599,10 @@ pub mod tests {
}
pub(crate) async fn test_setup_with_default_budget() -> TestSetup {
- test_setup(14350).await
+ test_setup(14350, 20).await
}
- pub(crate) async fn test_setup(budget: u64) -> TestSetup {
+ pub(crate) async fn test_setup(budget: u64, max_parallel_jobs: u64) -> TestSetup {
let catalog = TestCatalog::new();
let namespace = catalog
.create_namespace_1hr_retention("namespace_hot_partitions_to_compact")
@@ -617,7 +625,7 @@ pub mod tests {
// Create a compactor
let time_provider = Arc::new(SystemProvider::new());
- let config = make_compactor_config(budget);
+ let config = make_compactor_config(budget, max_parallel_jobs);
let compactor = Arc::new(Compactor::new(
vec![shard.shard.id],
Arc::clone(&catalog.catalog),
@@ -640,161 +648,11 @@ pub mod tests {
}
#[tokio::test]
- async fn test_hot_compact_candidates_with_memory_budget() {
+ async fn test_hot_compact_candidates_with_limit_memory_budget() {
test_helpers::maybe_start_logging();
- let TestSetup {
- compactor,
- mock_compactor,
- shard,
- table,
- ..
- } = test_setup(14350).await;
-
- // Some times in the past to set to created_at of the files
- let hot_time_one_hour_ago = compactor.time_provider.hours_ago(1);
-
- // P1:
- // L0 2 rows. bytes: 2,250
- // L1 2 rows. bytes: 2,250
- // total = 2,250 + 2,250 = 4,500
- let partition1 = table.with_shard(&shard).create_partition("one").await;
-
- // 2 files with IDs 1 and 2
- let pf1_1 = TestParquetFileBuilder::default()
- .with_min_time(1)
- .with_max_time(5)
- .with_row_count(2)
- .with_compaction_level(CompactionLevel::Initial)
- .with_creation_time(hot_time_one_hour_ago);
- partition1.create_parquet_file_catalog_record(pf1_1).await;
-
- let pf1_2 = TestParquetFileBuilder::default()
- .with_min_time(4) // overlapped with pf1_1
- .with_max_time(6)
- .with_row_count(2)
- .with_compaction_level(CompactionLevel::FileNonOverlapped)
- .with_creation_time(hot_time_one_hour_ago);
- partition1.create_parquet_file_catalog_record(pf1_2).await;
-
- // P2:
- // L0 2 rows. bytes: 2,250
- // L1 2 rows. bytes: 2,250
- // total = 2,250 + 2,250 = 4,500
- let partition2 = table.with_shard(&shard).create_partition("two").await;
-
- // 2 files with IDs 3 and 4
- let pf2_1 = TestParquetFileBuilder::default()
- .with_min_time(1)
- .with_max_time(5)
- .with_row_count(2)
- .with_compaction_level(CompactionLevel::Initial)
- .with_creation_time(hot_time_one_hour_ago);
- partition2.create_parquet_file_catalog_record(pf2_1).await;
-
- let pf2_2 = TestParquetFileBuilder::default()
- .with_min_time(4) // overlapped with pf2_1
- .with_max_time(6)
- .with_row_count(2)
- .with_compaction_level(CompactionLevel::FileNonOverlapped)
- .with_creation_time(hot_time_one_hour_ago);
- partition2.create_parquet_file_catalog_record(pf2_2).await;
-
- // P3: bytes >= 90% of full budget = 90% * 14,350 = 12,915
- // L0 40 rows. bytes: 2,250
- // Five L1s. bytes: 2,250 each
- // total = 2,250 * 6 = 13,500
- let partition3 = table.with_shard(&shard).create_partition("three").await;
-
- // 6 files with IDs 5, 6, 7, 8, 9, 10
- let pf3_1 = TestParquetFileBuilder::default()
- .with_min_time(1)
- .with_max_time(6)
- .with_row_count(40)
- .with_compaction_level(CompactionLevel::Initial)
- .with_creation_time(hot_time_one_hour_ago);
- partition3.create_parquet_file_catalog_record(pf3_1).await;
-
- // Five overlapped L1 files
- for i in 1..6 {
- let pf3_i = TestParquetFileBuilder::default()
- .with_min_time(i) // overlapped with pf3_1
- .with_max_time(i)
- .with_row_count(24)
- .with_compaction_level(CompactionLevel::FileNonOverlapped)
- .with_creation_time(hot_time_one_hour_ago);
- partition3.create_parquet_file_catalog_record(pf3_i).await;
- }
-
- // P4: Over the full budget
- // L0 40 rows. bytes: 2,250
- // Six L1s. bytes: 2,250 each
- // total = 2,250 * 7 = 15,750 > 14350
- let partition4 = table.with_shard(&shard).create_partition("four").await;
-
- // 7 files with IDs 11, 12, 13, 14, 15, 16, 17
- let pf4_1 = TestParquetFileBuilder::default()
- .with_min_time(1)
- .with_max_time(7)
- .with_row_count(70)
- .with_compaction_level(CompactionLevel::Initial)
- .with_creation_time(hot_time_one_hour_ago);
- partition4.create_parquet_file_catalog_record(pf4_1).await;
-
- // Six overlapped L1 files
- for i in 1..7 {
- let pf4_i = TestParquetFileBuilder::default()
- .with_min_time(i) // overlapped with pf4_1
- .with_max_time(i)
- .with_row_count(40)
- .with_compaction_level(CompactionLevel::FileNonOverlapped)
- .with_creation_time(hot_time_one_hour_ago);
- partition4.create_parquet_file_catalog_record(pf4_i).await;
- }
-
- // P5:
- // L0 2 rows. bytes: 2,250
- // L1 2 rows. bytes: 2,250
- // total = 2,250 + 2,250 = 4,500
- let partition5 = table.with_shard(&shard).create_partition("five").await;
- // 2 files with IDs 18, 19
- let pf5_1 = TestParquetFileBuilder::default()
- .with_min_time(1)
- .with_max_time(5)
- .with_row_count(2)
- .with_compaction_level(CompactionLevel::Initial)
- .with_creation_time(hot_time_one_hour_ago);
- partition5.create_parquet_file_catalog_record(pf5_1).await;
-
- let pf5_2 = TestParquetFileBuilder::default()
- .with_min_time(4) // overlapped with pf5_1
- .with_max_time(6)
- .with_row_count(2)
- .with_compaction_level(CompactionLevel::FileNonOverlapped)
- .with_creation_time(hot_time_one_hour_ago);
- partition5.create_parquet_file_catalog_record(pf5_2).await;
-
- // P6:
- // L0 2 rows. bytes: 2,250
- // L1 2 rows. bytes: 2,250
- // total = 2,250 + 2,250 = 4,500
- let partition6 = table.with_shard(&shard).create_partition("six").await;
- // 2 files with IDs 20, 21
- let pf6_1 = TestParquetFileBuilder::default()
- .with_min_time(1)
- .with_max_time(5)
- .with_row_count(2)
- .with_compaction_level(CompactionLevel::Initial)
- .with_creation_time(hot_time_one_hour_ago);
- partition6.create_parquet_file_catalog_record(pf6_1).await;
-
- let pf6_2 = TestParquetFileBuilder::default()
- .with_min_time(4) // overlapped with pf6_1
- .with_max_time(6)
- .with_row_count(2)
- .with_compaction_level(CompactionLevel::FileNonOverlapped)
- .with_creation_time(hot_time_one_hour_ago);
- partition6.create_parquet_file_catalog_record(pf6_2).await;
+ // test setup with limit memory budget, 14350, and very large (aka unlimited in this test) max_parallel_jobs, 200
+ let (compactor, mock_compactor, partitions) = make_6_partitions(14350, 200).await;
// partition candidates: partitions with L0 and overlapped L1
let mut candidates = hot::hot_partitions_to_compact(Arc::clone(&compactor))
@@ -838,19 +696,19 @@ pub mod tests {
assert_eq!(group1.len(), 3);
let g1_candidate1 = &group1[0];
- assert_eq!(g1_candidate1.partition.id(), partition1.partition.id);
+ assert_eq!(g1_candidate1.partition.id(), partitions[0].partition.id);
let g1_candidate1_pf_ids: Vec<_> =
g1_candidate1.files.iter().map(|pf| pf.id().get()).collect();
assert_eq!(g1_candidate1_pf_ids, vec![2, 1]);
let g1_candidate2 = &group1[1];
- assert_eq!(g1_candidate2.partition.id(), partition2.partition.id);
+ assert_eq!(g1_candidate2.partition.id(), partitions[1].partition.id);
let g1_candidate2_pf_ids: Vec<_> =
g1_candidate2.files.iter().map(|pf| pf.id().get()).collect();
assert_eq!(g1_candidate2_pf_ids, vec![4, 3]);
let g1_candidate3 = &group1[2];
- assert_eq!(g1_candidate3.partition.id(), partition5.partition.id);
+ assert_eq!(g1_candidate3.partition.id(), partitions[4].partition.id);
let g1_candidate3_pf_ids: Vec<_> =
g1_candidate3.files.iter().map(|pf| pf.id().get()).collect();
assert_eq!(g1_candidate3_pf_ids, vec![19, 18]);
@@ -860,7 +718,7 @@ pub mod tests {
assert_eq!(group2.len(), 1);
let g2_candidate1 = &group2[0];
- assert_eq!(g2_candidate1.partition.id(), partition6.partition.id);
+ assert_eq!(g2_candidate1.partition.id(), partitions[5].partition.id);
let g2_candidate1_pf_ids: Vec<_> =
g2_candidate1.files.iter().map(|pf| pf.id().get()).collect();
assert_eq!(g2_candidate1_pf_ids, vec![21, 20]);
@@ -870,21 +728,99 @@ pub mod tests {
assert_eq!(group3.len(), 1);
let g3_candidate1 = &group3[0];
- assert_eq!(g3_candidate1.partition.id(), partition3.partition.id);
+ assert_eq!(g3_candidate1.partition.id(), partitions[2].partition.id);
let g3_candidate1_pf_ids: Vec<_> =
g3_candidate1.files.iter().map(|pf| pf.id().get()).collect();
- // all IDs of level-1 firts then level-0
+ // all IDs of level-1 first then level-0
assert_eq!(g3_candidate1_pf_ids, vec![6, 7, 8, 9, 10, 5]);
{
let mut repos = compactor.catalog.repositories().await;
let skipped_compactions = repos.partitions().list_skipped_compactions().await.unwrap();
assert_eq!(skipped_compactions.len(), 1);
- assert_eq!(skipped_compactions[0].partition_id, partition4.partition.id);
+ assert_eq!(
+ skipped_compactions[0].partition_id,
+ partitions[3].partition.id
+ );
assert_eq!(skipped_compactions[0].reason, "over memory budget");
}
}
+ #[tokio::test]
+ async fn test_hot_compact_candidates_with_limit_parallel_jobs() {
+ test_helpers::maybe_start_logging();
+
+ // tes setup with plenty of memory budget 1GB (aka unlimited) but limit to 2 parallel jobs
+ let (compactor, mock_compactor, partitions) =
+ make_6_partitions(1024 * 1024 * 1024, 2).await;
+
+ // partition candidates: partitions with L0 and overlapped L1
+ let mut candidates = hot::hot_partitions_to_compact(Arc::clone(&compactor))
+ .await
+ .unwrap();
+ assert_eq!(candidates.len(), 6);
+ candidates.sort_by_key(|c| c.candidate.partition_id);
+ {
+ let mut repos = compactor.catalog.repositories().await;
+ let skipped_compactions = repos.partitions().list_skipped_compactions().await.unwrap();
+ assert!(
+ skipped_compactions.is_empty(),
+ "Expected no skipped compactions, got: {skipped_compactions:?}"
+ );
+ }
+
+ // There are 3 rounds of parallel compaction:
+ //
+ // * Round 1: 2 candidates [P1, P2]
+ // * Round 2: 2 candidate [P3, P4]
+ // * Round 3: 1 candidate [P5, P6]
+
+ compact_candidates_with_memory_budget(
+ Arc::clone(&compactor),
+ "hot",
+ CompactionLevel::Initial,
+ mock_compactor.compaction_function(),
+ true,
+ candidates.into(),
+ )
+ .await;
+
+ let compaction_groups = mock_compactor.results();
+
+ // 3 rounds of parallel compaction
+ assert_eq!(compaction_groups.len(), 3);
+
+ // Round 1
+ let group1 = &compaction_groups[0];
+ assert_eq!(group1.len(), 2);
+
+ let g1_candidate1 = &group1[0];
+ assert_eq!(g1_candidate1.partition.id(), partitions[0].partition.id);
+
+ let g1_candidate2 = &group1[1];
+ assert_eq!(g1_candidate2.partition.id(), partitions[1].partition.id);
+
+ // Round 2
+ let group2 = &compaction_groups[1];
+ assert_eq!(group2.len(), 2);
+
+ let g2_candidate1 = &group2[0];
+ assert_eq!(g2_candidate1.partition.id(), partitions[2].partition.id);
+
+ let g2_candidate2 = &group2[1];
+ assert_eq!(g2_candidate2.partition.id(), partitions[3].partition.id);
+
+ // Round 3
+ let group3 = &compaction_groups[2];
+ assert_eq!(group3.len(), 2);
+
+ let g3_candidate1 = &group3[0];
+ assert_eq!(g3_candidate1.partition.id(), partitions[4].partition.id);
+
+ let g3_candidate2 = &group3[1];
+ assert_eq!(g3_candidate2.partition.id(), partitions[5].partition.id);
+ }
+
// A quite sophisticated integration test of compacting one hot partition
// Beside lp data, every value min/max sequence numbers and min/max time are important
// to have a combination of needed tests in this test function
@@ -962,6 +898,7 @@ pub mod tests {
minutes_without_new_writes_to_be_cold: 10,
hot_compaction_hours_threshold_1: DEFAULT_HOT_COMPACTION_HOURS_THRESHOLD_1,
hot_compaction_hours_threshold_2: DEFAULT_HOT_COMPACTION_HOURS_THRESHOLD_2,
+ max_parallel_partitions: DEFAULT_MAX_PARALLEL_PARTITIONS,
};
let metrics = Arc::new(metric::Registry::new());
@@ -1153,4 +1090,172 @@ pub mod tests {
&batches
);
}
+
+ async fn make_6_partitions(
+ budget: u64,
+ max_parallel_jobs: u64,
+ ) -> (Arc<Compactor>, MockCompactor, Vec<Arc<TestPartition>>) {
+ let TestSetup {
+ compactor,
+ mock_compactor,
+ shard,
+ table,
+ ..
+ } = test_setup(budget, max_parallel_jobs).await;
+
+ // Some times in the past to set to created_at of the files
+ let hot_time_one_hour_ago = compactor.time_provider.hours_ago(1);
+
+ let mut partitions = Vec::with_capacity(6);
+
+ // P1:
+ // L0 2 rows. bytes: 2,250
+ // L1 2 rows. bytes: 2,250
+ // total = 2,250 + 2,250 = 4,500
+ let partition1 = table.with_shard(&shard).create_partition("one").await;
+
+ // 2 files with IDs 1 and 2
+ let pf1_1 = TestParquetFileBuilder::default()
+ .with_min_time(1)
+ .with_max_time(5)
+ .with_row_count(2)
+ .with_compaction_level(CompactionLevel::Initial)
+ .with_creation_time(hot_time_one_hour_ago);
+ partition1.create_parquet_file_catalog_record(pf1_1).await;
+
+ let pf1_2 = TestParquetFileBuilder::default()
+ .with_min_time(4) // overlapped with pf1_1
+ .with_max_time(6)
+ .with_row_count(2)
+ .with_compaction_level(CompactionLevel::FileNonOverlapped)
+ .with_creation_time(hot_time_one_hour_ago);
+ partition1.create_parquet_file_catalog_record(pf1_2).await;
+ partitions.push(partition1);
+
+ // P2:
+ // L0 2 rows. bytes: 2,250
+ // L1 2 rows. bytes: 2,250
+ // total = 2,250 + 2,250 = 4,500
+ let partition2 = table.with_shard(&shard).create_partition("two").await;
+
+ // 2 files with IDs 3 and 4
+ let pf2_1 = TestParquetFileBuilder::default()
+ .with_min_time(1)
+ .with_max_time(5)
+ .with_row_count(2)
+ .with_compaction_level(CompactionLevel::Initial)
+ .with_creation_time(hot_time_one_hour_ago);
+ partition2.create_parquet_file_catalog_record(pf2_1).await;
+
+ let pf2_2 = TestParquetFileBuilder::default()
+ .with_min_time(4) // overlapped with pf2_1
+ .with_max_time(6)
+ .with_row_count(2)
+ .with_compaction_level(CompactionLevel::FileNonOverlapped)
+ .with_creation_time(hot_time_one_hour_ago);
+ partition2.create_parquet_file_catalog_record(pf2_2).await;
+ partitions.push(partition2);
+
+ // P3: bytes >= 90% of full budget = 90% * 14,350 = 12,915
+ // L0 40 rows. bytes: 2,250
+ // Five L1s. bytes: 2,250 each
+ // total = 2,250 * 6 = 13,500
+ let partition3 = table.with_shard(&shard).create_partition("three").await;
+
+ // 6 files with IDs 5, 6, 7, 8, 9, 10
+ let pf3_1 = TestParquetFileBuilder::default()
+ .with_min_time(1)
+ .with_max_time(6)
+ .with_row_count(40)
+ .with_compaction_level(CompactionLevel::Initial)
+ .with_creation_time(hot_time_one_hour_ago);
+ partition3.create_parquet_file_catalog_record(pf3_1).await;
+
+ // Five overlapped L1 files
+ for i in 1..6 {
+ let pf3_i = TestParquetFileBuilder::default()
+ .with_min_time(i) // overlapped with pf3_1
+ .with_max_time(i)
+ .with_row_count(24)
+ .with_compaction_level(CompactionLevel::FileNonOverlapped)
+ .with_creation_time(hot_time_one_hour_ago);
+ partition3.create_parquet_file_catalog_record(pf3_i).await;
+ }
+ partitions.push(partition3);
+
+ // P4: Over the full budget
+ // L0 40 rows. bytes: 2,250
+ // Six L1s. bytes: 2,250 each
+ // total = 2,250 * 7 = 15,750 > 14350
+ let partition4 = table.with_shard(&shard).create_partition("four").await;
+
+ // 7 files with IDs 11, 12, 13, 14, 15, 16, 17
+ let pf4_1 = TestParquetFileBuilder::default()
+ .with_min_time(1)
+ .with_max_time(7)
+ .with_row_count(70)
+ .with_compaction_level(CompactionLevel::Initial)
+ .with_creation_time(hot_time_one_hour_ago);
+ partition4.create_parquet_file_catalog_record(pf4_1).await;
+
+ // Six overlapped L1 files
+ for i in 1..7 {
+ let pf4_i = TestParquetFileBuilder::default()
+ .with_min_time(i) // overlapped with pf4_1
+ .with_max_time(i)
+ .with_row_count(40)
+ .with_compaction_level(CompactionLevel::FileNonOverlapped)
+ .with_creation_time(hot_time_one_hour_ago);
+ partition4.create_parquet_file_catalog_record(pf4_i).await;
+ }
+ partitions.push(partition4);
+
+ // P5:
+ // L0 2 rows. bytes: 2,250
+ // L1 2 rows. bytes: 2,250
+ // total = 2,250 + 2,250 = 4,500
+ let partition5 = table.with_shard(&shard).create_partition("five").await;
+ // 2 files with IDs 18, 19
+ let pf5_1 = TestParquetFileBuilder::default()
+ .with_min_time(1)
+ .with_max_time(5)
+ .with_row_count(2)
+ .with_compaction_level(CompactionLevel::Initial)
+ .with_creation_time(hot_time_one_hour_ago);
+ partition5.create_parquet_file_catalog_record(pf5_1).await;
+
+ let pf5_2 = TestParquetFileBuilder::default()
+ .with_min_time(4) // overlapped with pf5_1
+ .with_max_time(6)
+ .with_row_count(2)
+ .with_compaction_level(CompactionLevel::FileNonOverlapped)
+ .with_creation_time(hot_time_one_hour_ago);
+ partition5.create_parquet_file_catalog_record(pf5_2).await;
+ partitions.push(partition5);
+
+ // P6:
+ // L0 2 rows. bytes: 2,250
+ // L1 2 rows. bytes: 2,250
+ // total = 2,250 + 2,250 = 4,500
+ let partition6 = table.with_shard(&shard).create_partition("six").await;
+ // 2 files with IDs 20, 21
+ let pf6_1 = TestParquetFileBuilder::default()
+ .with_min_time(1)
+ .with_max_time(5)
+ .with_row_count(2)
+ .with_compaction_level(CompactionLevel::Initial)
+ .with_creation_time(hot_time_one_hour_ago);
+ partition6.create_parquet_file_catalog_record(pf6_1).await;
+
+ let pf6_2 = TestParquetFileBuilder::default()
+ .with_min_time(4) // overlapped with pf6_1
+ .with_max_time(6)
+ .with_row_count(2)
+ .with_compaction_level(CompactionLevel::FileNonOverlapped)
+ .with_creation_time(hot_time_one_hour_ago);
+ partition6.create_parquet_file_catalog_record(pf6_2).await;
+ partitions.push(partition6);
+
+ (compactor, mock_compactor, partitions)
+ }
}
diff --git a/influxdb_iox/src/commands/run/all_in_one.rs b/influxdb_iox/src/commands/run/all_in_one.rs
index 00adedb793..a554b29400 100644
--- a/influxdb_iox/src/commands/run/all_in_one.rs
+++ b/influxdb_iox/src/commands/run/all_in_one.rs
@@ -439,6 +439,7 @@ impl Config {
minutes_without_new_writes_to_be_cold: 10,
hot_compaction_hours_threshold_1: 4,
hot_compaction_hours_threshold_2: 24,
+ max_parallel_partitions: 20,
};
let querier_config = QuerierConfig {
diff --git a/ioxd_compactor/src/lib.rs b/ioxd_compactor/src/lib.rs
index 7dd7525d4e..3601d3401f 100644
--- a/ioxd_compactor/src/lib.rs
+++ b/ioxd_compactor/src/lib.rs
@@ -214,6 +214,7 @@ pub async fn build_compactor_from_config(
minutes_without_new_writes_to_be_cold,
hot_compaction_hours_threshold_1,
hot_compaction_hours_threshold_2,
+ max_parallel_partitions,
..
} = compactor_config;
@@ -231,6 +232,7 @@ pub async fn build_compactor_from_config(
minutes_without_new_writes_to_be_cold,
hot_compaction_hours_threshold_1,
hot_compaction_hours_threshold_2,
+ max_parallel_partitions,
};
Ok(compactor::compact::Compactor::new(
|
1400bf99e49bea291bf08be15b2539631deac40c
|
Nga Tran
|
2022-10-12 12:59:51
|
split memory estimation into bytes to store and bytes to stream (#5845)
|
* refactor: split memory estimation into bytes to store and bytes to stream
* chore: cleanup
|
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
refactor: split memory estimation into bytes to store and bytes to stream (#5845)
* refactor: split memory estimation into bytes to store and bytes to stream
* chore: cleanup
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
diff --git a/compactor/src/parquet_file.rs b/compactor/src/parquet_file.rs
index fa9a428d85..c4b56e5596 100644
--- a/compactor/src/parquet_file.rs
+++ b/compactor/src/parquet_file.rs
@@ -8,15 +8,23 @@ use data_types::{
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct CompactorParquetFile {
inner: ParquetFile,
+ // Bytes estimated a (Datafusion) query plan needs to scan and stream this parquet file
estimated_arrow_bytes: u64,
+ // Bytes estimated to store a parquet file in memory before running a query plan on it
+ estimated_file_size_in_memory_bytes: u64,
size_override: Option<i64>,
}
impl CompactorParquetFile {
- pub fn new(inner: ParquetFile, estimated_arrow_bytes: u64) -> Self {
+ pub fn new(
+ inner: ParquetFile,
+ estimated_arrow_bytes: u64,
+ estimated_file_size_in_memory_bytes: u64,
+ ) -> Self {
Self {
inner,
estimated_arrow_bytes,
+ estimated_file_size_in_memory_bytes,
size_override: None,
}
}
@@ -24,16 +32,21 @@ impl CompactorParquetFile {
pub(crate) fn new_with_size_override(
inner: ParquetFile,
estimated_arrow_bytes: u64,
+ estimated_file_size_in_memory_bytes: u64,
size: i64,
) -> Self {
- let mut this = Self::new(inner, estimated_arrow_bytes);
+ let mut this = Self::new(
+ inner,
+ estimated_arrow_bytes,
+ estimated_file_size_in_memory_bytes,
+ );
this.size_override = Some(size);
this
}
#[cfg(test)]
pub(crate) fn with_size_override(f: ParquetFile, size: i64) -> Self {
- let mut this = Self::new(f, 0);
+ let mut this = Self::new(f, 0, 0);
this.size_override = Some(size);
this
}
@@ -46,10 +59,19 @@ impl CompactorParquetFile {
self.size_override.unwrap_or(self.inner.file_size_bytes)
}
- pub fn estimated_arrow_bytes(&self) -> u64 {
+ // Bytes estimated to scan and stream the columns of this file
+ // This bytes will be used to compute:
+ // . the input bytes needed to scan this file when we compact it with other files
+ // . the bytes each output stream needed when we split uoutput data into multiple streams
+ pub fn estimated_arrow_bytes_for_streaming_query_plan(&self) -> u64 {
self.estimated_arrow_bytes
}
+ // Bytes estimated to store the parquet file in memory and then scan & stream it
+ pub fn estimated_file_total_bytes_for_in_memory_storing_and_scanning(&self) -> u64 {
+ self.estimated_file_size_in_memory_bytes + self.estimated_arrow_bytes
+ }
+
pub fn compaction_level(&self) -> CompactionLevel {
self.inner.compaction_level
}
@@ -112,7 +134,7 @@ pub mod tests {
match size_override {
Some(size) => Self::with_size_override(parquet_file, size),
- None => Self::new(parquet_file, 0),
+ None => Self::new(parquet_file, 0, 0),
}
}
}
diff --git a/compactor/src/parquet_file_filtering.rs b/compactor/src/parquet_file_filtering.rs
index 44f296d5aa..61215d61ba 100644
--- a/compactor/src/parquet_file_filtering.rs
+++ b/compactor/src/parquet_file_filtering.rs
@@ -122,7 +122,8 @@ fn filter_parquet_files_inner(
let mut total_estimated_budget = 0;
for level_n_file in level_n {
// Estimate memory needed for this LN file
- let ln_estimated_file_bytes = level_n_file.estimated_arrow_bytes();
+ let ln_estimated_file_bytes =
+ level_n_file.estimated_file_total_bytes_for_in_memory_storing_and_scanning();
// Note: even though we can stop here if the ln_estimated_file_bytes is larger than the
// given budget,we still continue estimated the memory needed for its overlapped LN+1 to
@@ -136,7 +137,7 @@ fn filter_parquet_files_inner(
// Estimate memory needed for each LN+1
let current_ln_plus_1_estimated_file_bytes: Vec<_> = overlaps
.iter()
- .map(|file| file.estimated_arrow_bytes())
+ .map(|file| file.estimated_file_total_bytes_for_in_memory_storing_and_scanning())
.collect();
let estimated_file_bytes =
ln_estimated_file_bytes + current_ln_plus_1_estimated_file_bytes.iter().sum::<u64>();
@@ -1203,7 +1204,7 @@ mod tests {
column_set: ColumnSet::new(std::iter::empty()),
};
// Estimated arrow bytes for one file with a tag, a time and 11 rows = 1176
- CompactorParquetFile::new(f, 1176)
+ CompactorParquetFile::new(f, 1176, 0)
}
}
diff --git a/compactor/src/parquet_file_lookup.rs b/compactor/src/parquet_file_lookup.rs
index 7ec839e7ca..f08fa2354a 100644
--- a/compactor/src/parquet_file_lookup.rs
+++ b/compactor/src/parquet_file_lookup.rs
@@ -93,17 +93,20 @@ impl ParquetFilesForCompaction {
min_num_rows_allocated_per_record_batch_to_datafusion_plan,
parquet_file.row_count,
);
- // Add bytes need to read this file into memory
- // as well as the bytes needed to buffer the output
- let total_estimated_bytes =
- estimated_arrow_bytes + (2 * parquet_file.file_size_bytes as u64);
+ // Estimated bytes to store this file in memory
+ let estimated_bytes_to_store_in_memory = 2 * parquet_file.file_size_bytes as u64;
let parquet_file = match size_overrides.get(&parquet_file.id) {
Some(size) => CompactorParquetFile::new_with_size_override(
parquet_file,
- total_estimated_bytes,
+ estimated_arrow_bytes,
+ estimated_bytes_to_store_in_memory,
*size,
),
- None => CompactorParquetFile::new(parquet_file, total_estimated_bytes),
+ None => CompactorParquetFile::new(
+ parquet_file,
+ estimated_arrow_bytes,
+ estimated_bytes_to_store_in_memory,
+ ),
};
match parquet_file.compaction_level() {
CompactionLevel::Initial => level_0.push(parquet_file),
@@ -269,6 +272,7 @@ mod tests {
parquet_files_for_compaction.level_0,
vec![CompactorParquetFile::new(
parquet_file.parquet_file,
+ 0,
parquet_file_file_size_in_mem
)]
);
@@ -318,6 +322,7 @@ mod tests {
parquet_files_for_compaction.level_1,
vec![CompactorParquetFile::new(
parquet_file.parquet_file,
+ 0,
parquet_file_file_size_in_mem
)]
);
@@ -367,6 +372,7 @@ mod tests {
parquet_files_for_compaction.level_2,
vec![CompactorParquetFile::new(
parquet_file.parquet_file,
+ 0,
parquet_file_file_size_in_mem
)]
);
@@ -416,6 +422,7 @@ mod tests {
parquet_files_for_compaction.level_0,
vec![CompactorParquetFile::new(
l0.parquet_file,
+ 0,
l0_file_size_in_mem
)]
);
@@ -425,6 +432,7 @@ mod tests {
parquet_files_for_compaction.level_1,
vec![CompactorParquetFile::new(
l1.parquet_file,
+ 0,
l1_file_size_in_mem
)]
);
@@ -434,6 +442,7 @@ mod tests {
parquet_files_for_compaction.level_2,
vec![CompactorParquetFile::new(
l2.parquet_file,
+ 0,
l2_file_size_in_mem
)]
);
@@ -490,10 +499,12 @@ mod tests {
vec![
CompactorParquetFile::new(
l0_max_seq_50.parquet_file,
+ 0,
l0_max_seq_50_file_size_in_mem
),
CompactorParquetFile::new(
l0_max_seq_100.parquet_file,
+ 0,
l0_max_seq_100_file_size_in_mem
),
]
@@ -503,6 +514,7 @@ mod tests {
parquet_files_for_compaction.level_1,
vec![CompactorParquetFile::new(
l1.parquet_file,
+ 0,
l1_file_size_in_mem
)]
);
@@ -570,6 +582,7 @@ mod tests {
parquet_files_for_compaction.level_0,
vec![CompactorParquetFile::new(
l0.parquet_file,
+ 0,
l0_file_size_in_mem
)]
);
@@ -585,14 +598,17 @@ mod tests {
vec![
CompactorParquetFile::new(
l1_min_time_6666.parquet_file,
+ 0,
l1_min_time_6666_file_size_in_mem
),
CompactorParquetFile::new(
l1_min_time_7777.parquet_file,
+ 0,
l1_min_time_7777_file_size_in_mem
),
CompactorParquetFile::new(
l1_min_time_8888.parquet_file,
+ 0,
l1_min_time_8888_file_size_in_mem
),
]
@@ -603,6 +619,7 @@ mod tests {
parquet_files_for_compaction.level_2,
vec![CompactorParquetFile::new(
l2.parquet_file,
+ 0,
l2_file_size_in_mem
)]
);
|
cf1dd5c8311668c03dbb177bb6e5f1e98fd2292e
|
Michael Gattozzi
|
2025-02-06 20:25:18
|
broken format for JSON queries and add tests (#25980)
|
In #25927 we missed that JSON queries were broken despite having some
tests use the format. This fixes JSON queries such that they now
properly contain a comma between RecordBatches. This commit also
includes tests for the formats that now stream data back (CSV, JSON, and
JSON Lines) so that we won't run into this issue again.
| null |
fix: broken format for JSON queries and add tests (#25980)
In #25927 we missed that JSON queries were broken despite having some
tests use the format. This fixes JSON queries such that they now
properly contain a comma between RecordBatches. This commit also
includes tests for the formats that now stream data back (CSV, JSON, and
JSON Lines) so that we won't run into this issue again.
|
diff --git a/influxdb3_server/src/http.rs b/influxdb3_server/src/http.rs
index 4c4dc83f46..e98ef6c99b 100644
--- a/influxdb3_server/src/http.rs
+++ b/influxdb3_server/src/http.rs
@@ -1579,11 +1579,9 @@ async fn record_batch_stream_to_body(
fn start_row<W: std::io::Write>(
&self,
writer: &mut W,
- is_first_row: bool,
+ _is_first_row: bool,
) -> std::result::Result<(), arrow_schema::ArrowError> {
- if !is_first_row {
- writer.write_all(b",")?;
- }
+ writer.write_all(b",")?;
Ok(())
}
@@ -1840,10 +1838,16 @@ fn legacy_write_error_to_response(e: WriteParseError) -> Response<Body> {
mod tests {
use http::{header::ACCEPT, HeaderMap, HeaderValue};
- use super::QueryFormat;
-
+ use super::record_batch_stream_to_body;
use super::validate_db_name;
+ use super::QueryFormat;
use super::ValidateDbNameError;
+ use arrow_array::record_batch;
+ use datafusion::execution::SendableRecordBatchStream;
+ use datafusion::physical_plan::stream::RecordBatchStreamAdapter;
+ use hyper::body::to_bytes;
+ use pretty_assertions::assert_eq;
+ use std::str;
macro_rules! assert_validate_db_name {
($name:literal, $accept_rp:literal, $expected:pat) => {
@@ -1884,4 +1888,201 @@ mod tests {
assert_validate_db_name!("_foo", false, Err(ValidateDbNameError::InvalidStartChar));
assert_validate_db_name!("", false, Err(ValidateDbNameError::Empty));
}
+
+ #[tokio::test]
+ async fn test_json_output_empty() {
+ // Turn RecordBatches into a Body and then collect into Bytes to assert
+ // their validity
+ let bytes = to_bytes(
+ record_batch_stream_to_body(make_record_stream(None), QueryFormat::Json)
+ .await
+ .unwrap(),
+ )
+ .await
+ .unwrap();
+ assert_eq!(str::from_utf8(bytes.as_ref()).unwrap(), "[]");
+ }
+
+ #[tokio::test]
+ async fn test_json_output_one_record() {
+ // Turn RecordBatches into a Body and then collect into Bytes to assert
+ // their validity
+ let bytes = to_bytes(
+ record_batch_stream_to_body(make_record_stream(Some(1)), QueryFormat::Json)
+ .await
+ .unwrap(),
+ )
+ .await
+ .unwrap();
+ assert_eq!(str::from_utf8(bytes.as_ref()).unwrap(), "[{\"a\":1}]");
+ }
+ #[tokio::test]
+ async fn test_json_output_three_records() {
+ // Turn RecordBatches into a Body and then collect into Bytes to assert
+ // their validity
+ let bytes = to_bytes(
+ record_batch_stream_to_body(make_record_stream(Some(3)), QueryFormat::Json)
+ .await
+ .unwrap(),
+ )
+ .await
+ .unwrap();
+ assert_eq!(
+ str::from_utf8(bytes.as_ref()).unwrap(),
+ "[{\"a\":1},{\"a\":1},{\"a\":1}]"
+ );
+ }
+ #[tokio::test]
+ async fn test_json_output_five_records() {
+ // Turn RecordBatches into a Body and then collect into Bytes to assert
+ // their validity
+ let bytes = to_bytes(
+ record_batch_stream_to_body(make_record_stream(Some(5)), QueryFormat::Json)
+ .await
+ .unwrap(),
+ )
+ .await
+ .unwrap();
+ assert_eq!(
+ str::from_utf8(bytes.as_ref()).unwrap(),
+ "[{\"a\":1},{\"a\":1},{\"a\":1},{\"a\":1},{\"a\":1}]"
+ );
+ }
+
+ #[tokio::test]
+ async fn test_jsonl_output_empty() {
+ // Turn RecordBatches into a Body and then collect into Bytes to assert
+ // their validity
+ let bytes = to_bytes(
+ record_batch_stream_to_body(make_record_stream(None), QueryFormat::JsonLines)
+ .await
+ .unwrap(),
+ )
+ .await
+ .unwrap();
+ assert_eq!(str::from_utf8(bytes.as_ref()).unwrap(), "");
+ }
+
+ #[tokio::test]
+ async fn test_jsonl_output_one_record() {
+ // Turn RecordBatches into a Body and then collect into Bytes to assert
+ // their validity
+ let bytes = to_bytes(
+ record_batch_stream_to_body(make_record_stream(Some(1)), QueryFormat::JsonLines)
+ .await
+ .unwrap(),
+ )
+ .await
+ .unwrap();
+ assert_eq!(str::from_utf8(bytes.as_ref()).unwrap(), "{\"a\":1}\n");
+ }
+ #[tokio::test]
+ async fn test_jsonl_output_three_records() {
+ // Turn RecordBatches into a Body and then collect into Bytes to assert
+ // their validity
+ let bytes = to_bytes(
+ record_batch_stream_to_body(make_record_stream(Some(3)), QueryFormat::JsonLines)
+ .await
+ .unwrap(),
+ )
+ .await
+ .unwrap();
+ assert_eq!(
+ str::from_utf8(bytes.as_ref()).unwrap(),
+ "{\"a\":1}\n{\"a\":1}\n{\"a\":1}\n"
+ );
+ }
+ #[tokio::test]
+ async fn test_jsonl_output_five_records() {
+ // Turn RecordBatches into a Body and then collect into Bytes to assert
+ // their validity
+ let bytes = to_bytes(
+ record_batch_stream_to_body(make_record_stream(Some(5)), QueryFormat::JsonLines)
+ .await
+ .unwrap(),
+ )
+ .await
+ .unwrap();
+ assert_eq!(
+ str::from_utf8(bytes.as_ref()).unwrap(),
+ "{\"a\":1}\n{\"a\":1}\n{\"a\":1}\n{\"a\":1}\n{\"a\":1}\n"
+ );
+ }
+ #[tokio::test]
+ async fn test_csv_output_empty() {
+ // Turn RecordBatches into a Body and then collect into Bytes to assert
+ // their validity
+ let bytes = to_bytes(
+ record_batch_stream_to_body(make_record_stream(None), QueryFormat::Csv)
+ .await
+ .unwrap(),
+ )
+ .await
+ .unwrap();
+ assert_eq!(str::from_utf8(bytes.as_ref()).unwrap(), "");
+ }
+
+ #[tokio::test]
+ async fn test_csv_output_one_record() {
+ // Turn RecordBatches into a Body and then collect into Bytes to assert
+ // their validity
+ let bytes = to_bytes(
+ record_batch_stream_to_body(make_record_stream(Some(1)), QueryFormat::Csv)
+ .await
+ .unwrap(),
+ )
+ .await
+ .unwrap();
+ assert_eq!(str::from_utf8(bytes.as_ref()).unwrap(), "a\n1\n");
+ }
+ #[tokio::test]
+ async fn test_csv_output_three_records() {
+ // Turn RecordBatches into a Body and then collect into Bytes to assert
+ // their validity
+ let bytes = to_bytes(
+ record_batch_stream_to_body(make_record_stream(Some(3)), QueryFormat::Csv)
+ .await
+ .unwrap(),
+ )
+ .await
+ .unwrap();
+ assert_eq!(str::from_utf8(bytes.as_ref()).unwrap(), "a\n1\n1\n1\n");
+ }
+ #[tokio::test]
+ async fn test_csv_output_five_records() {
+ // Turn RecordBatches into a Body and then collect into Bytes to assert
+ // their validity
+ let bytes = to_bytes(
+ record_batch_stream_to_body(make_record_stream(Some(5)), QueryFormat::Csv)
+ .await
+ .unwrap(),
+ )
+ .await
+ .unwrap();
+ assert_eq!(
+ str::from_utf8(bytes.as_ref()).unwrap(),
+ "a\n1\n1\n1\n1\n1\n"
+ );
+ }
+ fn make_record_stream(records: Option<usize>) -> SendableRecordBatchStream {
+ let mut batches = Vec::new();
+ let batch = record_batch!(("a", Int32, [1])).unwrap();
+ let schema = batch.schema();
+ let num = match records {
+ None => {
+ let stream = futures::stream::iter(Vec::new());
+ let adapter = RecordBatchStreamAdapter::new(schema, stream);
+ return Box::pin(adapter);
+ }
+ Some(num) => num,
+ };
+ batches.push(Ok(batch));
+ for _ in 1..num {
+ batches.push(Ok(record_batch!(("a", Int32, [1])).unwrap()));
+ }
+ let stream = futures::stream::iter(batches);
+ // Convert the stream to a SendableRecordBatchStream
+ let adapter = RecordBatchStreamAdapter::new(schema, stream);
+ Box::pin(adapter)
+ }
}
|
48a5c3e96636c3f9d4e025adcaf62e19ec2b9b1c
|
Andrew Lamb
|
2023-07-13 06:14:59
|
Add longer sleep in `end_to_end_cases::debug::build_catalog` and extra logging (#8224)
|
* fix: Add longer sleep in end_to_end_cases::debug::build_catalog
* chore: add debug logging when test fails
---------
|
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
chore: Add longer sleep in `end_to_end_cases::debug::build_catalog` and extra logging (#8224)
* fix: Add longer sleep in end_to_end_cases::debug::build_catalog
* chore: add debug logging when test fails
---------
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
diff --git a/influxdb_iox/tests/end_to_end_cases/debug.rs b/influxdb_iox/tests/end_to_end_cases/debug.rs
index da50bc0c87..937c91c430 100644
--- a/influxdb_iox/tests/end_to_end_cases/debug.rs
+++ b/influxdb_iox/tests/end_to_end_cases/debug.rs
@@ -1,5 +1,10 @@
//! Tests the `influxdb_iox debug` commands
-use std::{path::Path, time::Duration};
+use std::{
+ collections::VecDeque,
+ io::Write,
+ path::{Path, PathBuf},
+ time::Duration,
+};
use arrow::record_batch::RecordBatch;
use arrow_util::assert_batches_sorted_eq;
@@ -105,20 +110,18 @@ async fn build_catalog() {
// We can build a catalog and start up the server and run a query
let restarted = RestartedServer::build_catalog_and_start(&table_dir).await;
- let batches = run_sql_until_non_empty(&restarted, sql, namespace.as_str())
- .with_timeout(Duration::from_secs(2))
- .await
- .expect("timed out waiting for non-empty batches in result");
+ let batches = restarted
+ .run_sql_until_non_empty(sql, namespace.as_str())
+ .await;
assert_batches_sorted_eq!(&expected, &batches);
// We can also rebuild a catalog from just the parquet files
let only_parquet_dir = copy_only_parquet_files(&table_dir);
let restarted =
RestartedServer::build_catalog_and_start(only_parquet_dir.path()).await;
- let batches = run_sql_until_non_empty(&restarted, sql, namespace.as_str())
- .with_timeout(Duration::from_secs(2))
- .await
- .expect("timed out waiting for non-empty batches in result");
+ let batches = restarted
+ .run_sql_until_non_empty(sql, namespace.as_str())
+ .await;
assert_batches_sorted_eq!(&expected, &batches);
}
.boxed()
@@ -129,23 +132,6 @@ async fn build_catalog() {
.await
}
-/// Loops forever, running the SQL query against the [`RestartedServer`] given
-/// until the result is non-empty. Callers are responsible for timing out the
-/// function.
-async fn run_sql_until_non_empty(
- restarted: &RestartedServer,
- sql: &str,
- namespace: &str,
-) -> Vec<RecordBatch> {
- loop {
- let batches = restarted.run_sql(sql, namespace).await;
- if !batches.is_empty() {
- return batches;
- }
- tokio::time::sleep(Duration::from_millis(100)).await;
- }
-}
-
/// An all in one instance, with data directory of `data_dir`
struct RestartedServer {
all_in_one: ServerFixture,
@@ -183,27 +169,40 @@ impl RestartedServer {
println!("target_directory: {data_dir:?}");
// call `influxdb_iox debug build-catalog <table_dir> <new_data_dir>`
- Command::cargo_bin("influxdb_iox")
+ let cmd = Command::cargo_bin("influxdb_iox")
.unwrap()
// use -v to enable logging so we can check the status messages
- .arg("-v")
+ .arg("-vv")
.arg("debug")
.arg("build-catalog")
.arg(exported_table_dir.as_os_str().to_str().unwrap())
.arg(data_dir.path().as_os_str().to_str().unwrap())
.assert()
- .success()
- .stdout(
- predicate::str::contains("Beginning catalog / object_store build")
- .and(predicate::str::contains(
- "Begin importing files total_files=1",
- ))
- .and(predicate::str::contains(
- "Completed importing files total_files=1",
- )),
- );
+ .success();
+
+ // debug information to track down https://github.com/influxdata/influxdb_iox/issues/8203
+ println!("***** Begin build-catalog STDOUT ****");
+ std::io::stdout()
+ .write_all(&cmd.get_output().stdout)
+ .unwrap();
+ println!("***** Begin build-catalog STDERR ****");
+ std::io::stdout()
+ .write_all(&cmd.get_output().stderr)
+ .unwrap();
+ println!("***** DONE ****");
+
+ cmd.stdout(
+ predicate::str::contains("Beginning catalog / object_store build")
+ .and(predicate::str::contains(
+ "Begin importing files total_files=1",
+ ))
+ .and(predicate::str::contains(
+ "Completed importing files total_files=1",
+ )),
+ );
println!("Completed rebuild in {data_dir:?}");
+ RecursiveDirPrinter::new().print(data_dir.path());
// now, start up a new server in all-in-one mode
// using the newly built data directory
@@ -215,6 +214,27 @@ impl RestartedServer {
data_dir,
}
}
+
+ /// Runs the SQL query against this server, in a loop until
+ /// results are returned. Panics if the results are not produced
+ /// within a 5 seconds
+ async fn run_sql_until_non_empty(&self, sql: &str, namespace: &str) -> Vec<RecordBatch> {
+ let timeout = Duration::from_secs(5);
+ let loop_sleep = Duration::from_millis(500);
+ let fut = async {
+ loop {
+ let batches = self.run_sql(sql, namespace).await;
+ if !batches.is_empty() {
+ return batches;
+ }
+ tokio::time::sleep(loop_sleep).await;
+ }
+ };
+
+ fut.with_timeout(timeout)
+ .await
+ .expect("timed out waiting for non-empty batches in result")
+ }
}
/// Copies only parquet files from the source directory to a new
@@ -240,3 +260,43 @@ fn copy_only_parquet_files(src: &Path) -> TempDir {
}
target_dir
}
+
+/// Prints out the contents of the directory recursively
+/// for debugging.
+///
+/// ```text
+/// RecursiveDirPrinter All files rooted at "/tmp/.tmpvf16r0"
+/// "/tmp/.tmpvf16r0"
+/// "/tmp/.tmpvf16r0/catalog.sqlite"
+/// "/tmp/.tmpvf16r0/object_store"
+/// "/tmp/.tmpvf16r0/object_store/1"
+/// "/tmp/.tmpvf16r0/object_store/1/1"
+/// "/tmp/.tmpvf16r0/object_store/1/1/b862a7e9b329ee6a418cde191198eaeb1512753f19b87a81def2ae6c3d0ed237"
+/// "/tmp/.tmpvf16r0/object_store/1/1/b862a7e9b329ee6a418cde191198eaeb1512753f19b87a81def2ae6c3d0ed237/d78abef6-6859-48eb-aa62-3518097fbb9b.parquet"
+///
+struct RecursiveDirPrinter {
+ paths: VecDeque<PathBuf>,
+}
+
+impl RecursiveDirPrinter {
+ fn new() -> Self {
+ Self {
+ paths: VecDeque::new(),
+ }
+ }
+
+ // print root and all directories
+ fn print(mut self, root: &Path) {
+ println!("RecursiveDirPrinter All files rooted at {root:?}");
+ self.paths.push_back(PathBuf::from(root));
+
+ while let Some(path) = self.paths.pop_front() {
+ println!("{path:?}");
+ if path.is_dir() {
+ for entry in std::fs::read_dir(path).unwrap() {
+ self.paths.push_front(entry.unwrap().path());
+ }
+ }
+ }
+ }
+}
|
9d8b620cd209959b2399380ff15de216b16d434a
|
Marco Neumann
|
2023-06-27 16:44:06
|
gather column ranges after decoding (#8090)
|
We need to decode the ingester data in a serial fashion (since it is a
data stream). Cache access during that phase is costly since we cannot
parallize that. To avoid that, we gather the column ranges AFTER
decoding and calculate the chunk statistics accordingly.
This refactoring also removes the partition sort key from ingester
partitions since they are not required anymore. They are a leftover of
the old physical query planning. They were not marked as "unused" since
they were used by some test code.
Required for #8089.
|
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
refactor: gather column ranges after decoding (#8090)
We need to decode the ingester data in a serial fashion (since it is a
data stream). Cache access during that phase is costly since we cannot
parallize that. To avoid that, we gather the column ranges AFTER
decoding and calculate the chunk statistics accordingly.
This refactoring also removes the partition sort key from ingester
partitions since they are not required anymore. They are a leftover of
the old physical query planning. They were not marked as "unused" since
they were used by some test code.
Required for #8089.
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
diff --git a/querier/src/ingester/mod.rs b/querier/src/ingester/mod.rs
index 7335ff746a..45963c4ddc 100644
--- a/querier/src/ingester/mod.rs
+++ b/querier/src/ingester/mod.rs
@@ -9,18 +9,16 @@ use self::{
use crate::{
cache::{namespace::CachedTable, CatalogCache},
df_stats::{create_chunk_statistics, ColumnRanges},
+ CONCURRENT_CHUNK_CREATION_JOBS,
};
use arrow::{datatypes::DataType, error::ArrowError, record_batch::RecordBatch};
use arrow_flight::decode::DecodedPayload;
use async_trait::async_trait;
use backoff::{Backoff, BackoffConfig, BackoffError};
use client_util::connection;
-use data_types::{
- ChunkId, ChunkOrder, DeletePredicate, NamespaceId, PartitionHashId, PartitionId,
- TimestampMinMax,
-};
+use data_types::{ChunkId, ChunkOrder, DeletePredicate, NamespaceId, PartitionHashId, PartitionId};
use datafusion::{error::DataFusionError, physical_plan::Statistics};
-use futures::{stream::FuturesUnordered, TryStreamExt};
+use futures::{stream::FuturesUnordered, StreamExt, TryStreamExt};
use ingester_query_grpc::{
encode_proto_predicate_as_base64, influxdata::iox::ingester::v1::IngesterQueryResponseMetadata,
IngesterQueryRequest,
@@ -34,6 +32,7 @@ use iox_time::{Time, TimeProvider};
use metric::{DurationHistogram, Metric};
use observability_deps::tracing::{debug, trace, warn};
use predicate::Predicate;
+use rand::{rngs::StdRng, seq::SliceRandom, SeedableRng};
use schema::{sort::SortKey, Projection, Schema};
use snafu::{ensure, OptionExt, ResultExt, Snafu};
use std::{
@@ -496,10 +495,10 @@ async fn execute(
span_recorder.child_span("IngesterStreamDecoder"),
);
for (msg, md) in messages {
- decoder.register(msg, md).await?;
+ decoder.register(msg, md)?;
}
- decoder.finalize()
+ decoder.finalize().await
}
/// Helper to disassemble the data from the ingester Apache Flight arrow stream.
@@ -564,7 +563,7 @@ impl IngesterStreamDecoder {
}
/// Register a new message and its metadata from the Flight stream.
- async fn register(
+ fn register(
&mut self,
msg: DecodedPayload,
md: IngesterQueryResponseMetadata,
@@ -589,25 +588,6 @@ impl IngesterStreamDecoder {
},
);
- // Use a temporary empty partition sort key. We are going to fetch this AFTER we
- // know all chunks because then we are able to detect all relevant primary key
- // columns that the sort key must cover.
- let partition_sort_key = None;
-
- // If the partition does NOT yet exist within the catalog, this is OK. We can deal without the ranges,
- // the chunk pruning will not be as efficient though.
- let partition_column_ranges = self
- .catalog_cache
- .partition()
- .column_ranges(
- Arc::clone(&self.cached_table),
- partition_id,
- self.span_recorder
- .child_span("cache GET partition column ranges"),
- )
- .await
- .unwrap_or_default();
-
let ingester_uuid =
Uuid::parse_str(&md.ingester_uuid).context(IngesterUuidSnafu {
ingester_uuid: md.ingester_uuid,
@@ -618,8 +598,6 @@ impl IngesterStreamDecoder {
partition_id,
partition_hash_id,
md.completed_persistence_count,
- partition_sort_key,
- partition_column_ranges,
);
self.current_partition = Some(partition);
}
@@ -659,20 +637,47 @@ impl IngesterStreamDecoder {
}
/// Flush internal state and return sorted set of partitions.
- fn finalize(mut self) -> Result<Vec<IngesterPartition>> {
+ async fn finalize(mut self) -> Result<Vec<IngesterPartition>> {
self.flush_partition()?;
- let mut ids: Vec<_> = self.finished_partitions.keys().copied().collect();
- ids.sort();
+ let mut partitions = self.finished_partitions.into_values().collect::<Vec<_>>();
- let partitions = ids
- .into_iter()
- .map(|id| {
- self.finished_partitions
- .remove(&id)
- .expect("just got key from this map")
+ // shuffle order to even catalog load, because cache hits/misses might be correlated w/ the order of the
+ // partitions.
+ //
+ // Note that we sort before shuffling to achieve a deterministic pseudo-random order
+ let mut rng = StdRng::seed_from_u64(self.cached_table.id.get() as u64);
+ partitions.sort_by_key(|p| p.partition_id);
+ partitions.shuffle(&mut rng);
+
+ let mut partitions = futures::stream::iter(partitions)
+ .map(|p| {
+ let cached_table = &self.cached_table;
+ let catalog_cache = &self.catalog_cache;
+ let span = self
+ .span_recorder
+ .child_span("fetch column ranges for partition");
+ async move {
+ // If the partition does NOT yet exist within the catalog, this is OK. We can deal without the ranges,
+ // the chunk pruning will not be as efficient though.
+ let ranges = catalog_cache
+ .partition()
+ .column_ranges(Arc::clone(cached_table), p.partition_id, span)
+ .await
+ .unwrap_or_default();
+ (p, ranges)
+ }
})
- .collect();
+ .buffer_unordered(CONCURRENT_CHUNK_CREATION_JOBS)
+ .map(|(mut p, ranges)| {
+ p.set_partition_column_ranges(ranges);
+ p
+ })
+ .collect::<Vec<_>>()
+ .await;
+
+ // deterministic order
+ partitions.sort_by_key(|p| p.partition_id);
self.span_recorder.ok("finished");
Ok(partitions)
}
@@ -822,12 +827,6 @@ pub struct IngesterPartition {
/// The number of Parquet files this ingester UUID has persisted for this partition.
completed_persistence_count: u64,
- /// Partition-wide sort key.
- partition_sort_key: Option<Arc<SortKey>>,
-
- /// Partition-wide column ranges.
- partition_column_ranges: ColumnRanges,
-
chunks: Vec<IngesterChunk>,
}
@@ -839,16 +838,12 @@ impl IngesterPartition {
partition_id: PartitionId,
partition_hash_id: Option<PartitionHashId>,
completed_persistence_count: u64,
- partition_sort_key: Option<Arc<SortKey>>,
- partition_column_ranges: ColumnRanges,
) -> Self {
Self {
ingester_uuid,
partition_id,
partition_hash_id,
completed_persistence_count,
- partition_sort_key,
- partition_column_ranges,
chunks: vec![],
}
}
@@ -876,25 +871,12 @@ impl IngesterPartition {
.map(|batch| ensure_schema(batch, &expected_schema))
.collect::<Result<Vec<RecordBatch>>>()?;
- // TODO: may want to ask the Ingester to send this value instead of computing it here.
- let ts_min_max = compute_timenanosecond_min_max(&batches).expect("Should have time range");
-
- let row_count = batches.iter().map(|batch| batch.num_rows()).sum::<usize>() as u64;
- let stats = Arc::new(create_chunk_statistics(
- row_count,
- &expected_schema,
- ts_min_max,
- &self.partition_column_ranges,
- ));
-
let chunk = IngesterChunk {
chunk_id,
partition_id: self.partition_id,
schema: expected_schema,
- partition_sort_key: self.partition_sort_key.clone(),
batches,
- ts_min_max,
- stats,
+ stats: None,
delete_predicates: vec![],
};
@@ -903,6 +885,27 @@ impl IngesterPartition {
Ok(self)
}
+ pub(crate) fn set_partition_column_ranges(&mut self, partition_column_ranges: ColumnRanges) {
+ for chunk in &mut self.chunks {
+ // TODO: may want to ask the Ingester to send this value instead of computing it here.
+ let ts_min_max =
+ compute_timenanosecond_min_max(&chunk.batches).expect("Should have time range");
+
+ let row_count = chunk
+ .batches
+ .iter()
+ .map(|batch| batch.num_rows())
+ .sum::<usize>() as u64;
+ let stats = Arc::new(create_chunk_statistics(
+ row_count,
+ &chunk.schema,
+ ts_min_max,
+ &partition_column_ranges,
+ ));
+ chunk.stats = Some(stats);
+ }
+ }
+
pub(crate) fn ingester_uuid(&self) -> Uuid {
self.ingester_uuid
}
@@ -943,17 +946,13 @@ pub struct IngesterChunk {
partition_id: PartitionId,
schema: Schema,
- /// Partition-wide sort key.
- partition_sort_key: Option<Arc<SortKey>>,
-
/// The raw table data
batches: Vec<RecordBatch>,
- /// Timestamp-specific stats
- ts_min_max: TimestampMinMax,
-
/// Summary Statistics
- stats: Arc<Statistics>,
+ ///
+ /// Set to `None` if not calculated yet.
+ stats: Option<Arc<Statistics>>,
delete_predicates: Vec<Arc<DeletePredicate>>,
}
@@ -982,7 +981,7 @@ impl IngesterChunk {
impl QueryChunkMeta for IngesterChunk {
fn stats(&self) -> Arc<Statistics> {
- Arc::clone(&self.stats)
+ Arc::clone(self.stats.as_ref().expect("chunk stats set"))
}
fn schema(&self) -> &Schema {
@@ -1854,8 +1853,6 @@ mod tests {
&PartitionKey::from("arbitrary"),
)),
0,
- None,
- Default::default(),
)
.try_add_chunk(ChunkId::new(), expected_schema.clone(), vec![case])
.unwrap();
@@ -1886,8 +1883,6 @@ mod tests {
&PartitionKey::from("arbitrary"),
)),
0,
- None,
- Default::default(),
)
.try_add_chunk(ChunkId::new(), expected_schema, vec![batch])
.unwrap_err();
diff --git a/querier/src/ingester/test_util.rs b/querier/src/ingester/test_util.rs
index 6ac5c49850..4770a2839e 100644
--- a/querier/src/ingester/test_util.rs
+++ b/querier/src/ingester/test_util.rs
@@ -1,5 +1,5 @@
use super::IngesterConnection;
-use crate::{cache::namespace::CachedTable, df_stats::create_chunk_statistics};
+use crate::cache::namespace::CachedTable;
use async_trait::async_trait;
use data_types::NamespaceId;
use parking_lot::Mutex;
@@ -67,44 +67,25 @@ impl IngesterConnection for MockIngesterConnection {
let partitions = partitions
.into_iter()
.map(|mut p| async move {
- let column_ranges = Arc::clone(&p.partition_column_ranges);
let chunks = p
.chunks
.into_iter()
- .map(|ic| {
- let column_ranges = Arc::clone(&column_ranges);
- async move {
- let batches: Vec<_> = ic
- .batches
- .iter()
- .map(|batch| match ic.schema.df_projection(selection).unwrap() {
- Some(projection) => batch.project(&projection).unwrap(),
- None => batch.clone(),
- })
- .collect();
+ .map(|ic| async move {
+ let batches: Vec<_> = ic
+ .batches
+ .iter()
+ .map(|batch| match ic.schema.df_projection(selection).unwrap() {
+ Some(projection) => batch.project(&projection).unwrap(),
+ None => batch.clone(),
+ })
+ .collect();
- assert!(!batches.is_empty(), "Error: empty batches");
- let new_schema = IOxSchema::try_from(batches[0].schema()).unwrap();
- let total_row_count =
- batches.iter().map(|b| b.num_rows()).sum::<usize>() as u64;
-
- let stats = create_chunk_statistics(
- total_row_count,
- &new_schema,
- ic.ts_min_max,
- &column_ranges,
- );
-
- super::IngesterChunk {
- chunk_id: ic.chunk_id,
- partition_id: ic.partition_id,
- schema: new_schema,
- partition_sort_key: ic.partition_sort_key,
- batches,
- ts_min_max: ic.ts_min_max,
- stats: Arc::new(stats),
- delete_predicates: vec![],
- }
+ assert!(!batches.is_empty(), "Error: empty batches");
+ let schema = IOxSchema::try_from(batches[0].schema()).unwrap();
+ super::IngesterChunk {
+ batches,
+ schema,
+ ..ic
}
})
.collect::<Vec<_>>();
diff --git a/querier/src/lib.rs b/querier/src/lib.rs
index 24cfd84f01..9985f1d0e5 100644
--- a/querier/src/lib.rs
+++ b/querier/src/lib.rs
@@ -27,6 +27,11 @@ mod server;
mod system_tables;
mod table;
+/// Number of concurrent chunk creation jobs.
+///
+/// This is mostly to fetch per-partition data concurrently.
+const CONCURRENT_CHUNK_CREATION_JOBS: usize = 100;
+
pub use cache::CatalogCache as QuerierCatalogCache;
pub use database::{Error as QuerierDatabaseError, QuerierDatabase};
pub use ingester::{
diff --git a/querier/src/parquet/creation.rs b/querier/src/parquet/creation.rs
index 1c8891d4ef..7ccf6cc33f 100644
--- a/querier/src/parquet/creation.rs
+++ b/querier/src/parquet/creation.rs
@@ -20,15 +20,11 @@ use crate::{
df_stats::{create_chunk_statistics, ColumnRanges},
parquet::QuerierParquetChunkMeta,
table::MetricPruningObserver,
+ CONCURRENT_CHUNK_CREATION_JOBS,
};
use super::QuerierParquetChunk;
-/// Number of concurrent chunk creation jobs.
-///
-/// This is mostly to fetch per-partition data concurrently.
-const CONCURRENT_CHUNK_CREATION_JOBS: usize = 100;
-
/// Adapter that can create chunks.
#[derive(Debug)]
pub struct ChunkAdapter {
diff --git a/querier/src/table/test_util.rs b/querier/src/table/test_util.rs
index 0cd99c0d35..d0ccbe86c5 100644
--- a/querier/src/table/test_util.rs
+++ b/querier/src/table/test_util.rs
@@ -8,7 +8,7 @@ use data_types::ChunkId;
use iox_catalog::interface::{get_schema_by_name, SoftDeletedRows};
use iox_tests::{TestCatalog, TestPartition, TestTable};
use mutable_batch_lp::test_helpers::lp_to_mutable_batch;
-use schema::{sort::SortKey, Projection, Schema};
+use schema::{Projection, Schema};
use std::{sync::Arc, time::Duration};
use tokio::runtime::Handle;
use uuid::Uuid;
@@ -67,7 +67,6 @@ pub(crate) struct IngesterPartitionBuilder {
partition: Arc<TestPartition>,
ingester_chunk_id: u128,
- partition_sort_key: Option<Arc<SortKey>>,
partition_column_ranges: ColumnRanges,
/// Data returned from the partition, in line protocol format
@@ -79,7 +78,6 @@ impl IngesterPartitionBuilder {
Self {
schema,
partition: Arc::clone(partition),
- partition_sort_key: None,
partition_column_ranges: Default::default(),
ingester_chunk_id: 1,
lp: Vec::new(),
@@ -103,19 +101,21 @@ impl IngesterPartitionBuilder {
pub(crate) fn build(&self) -> IngesterPartition {
let data = self.lp.iter().map(|lp| lp_to_record_batch(lp)).collect();
- IngesterPartition::new(
+ let mut part = IngesterPartition::new(
Uuid::new_v4(),
self.partition.partition.id,
self.partition.partition.hash_id().cloned(),
0,
- self.partition_sort_key.clone(),
- Arc::clone(&self.partition_column_ranges),
)
.try_add_chunk(
ChunkId::new_test(self.ingester_chunk_id),
self.schema.clone(),
data,
)
- .unwrap()
+ .unwrap();
+
+ part.set_partition_column_ranges(Arc::clone(&self.partition_column_ranges));
+
+ part
}
}
|
b8a94488b5693503731ec01c3e5b5a47255df516
|
Trevor Hilton
|
2025-01-16 11:59:01
|
support v1 query API GROUP BY semantics (#25845)
|
This updates the v1 /query API hanlder to handle InfluxDB v1's unique
query response structure when GROUP BY clauses are provided.
The distinction is in the addition of a "tags" field to the emitted series
data that contains a map of the GROUP BY tags along with their distinct
values associated with the data in the "values" field.
This required splitting the QueryExecutor into two query paths for InfluxQL
and SQL, as this allowed for handling InfluxQL query parsing in advance
of query planning.
A set of snapshot tests were added to check that it all works.
| null |
feat: support v1 query API GROUP BY semantics (#25845)
This updates the v1 /query API hanlder to handle InfluxDB v1's unique
query response structure when GROUP BY clauses are provided.
The distinction is in the addition of a "tags" field to the emitted series
data that contains a map of the GROUP BY tags along with their distinct
values associated with the data in the "values" field.
This required splitting the QueryExecutor into two query paths for InfluxQL
and SQL, as this allowed for handling InfluxQL query parsing in advance
of query planning.
A set of snapshot tests were added to check that it all works.
|
diff --git a/Cargo.lock b/Cargo.lock
index d994b338d7..832a745d01 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -2918,6 +2918,7 @@ version = "0.1.0"
dependencies = [
"async-trait",
"datafusion",
+ "influxdb_influxql_parser",
"iox_query",
"iox_query_params",
"thiserror 1.0.69",
@@ -3052,6 +3053,7 @@ dependencies = [
"influxdb3_telemetry",
"influxdb3_wal",
"influxdb3_write",
+ "influxdb_influxql_parser",
"iox_catalog",
"iox_http",
"iox_query",
@@ -3071,6 +3073,7 @@ dependencies = [
"pin-project-lite",
"pretty_assertions",
"pyo3",
+ "regex",
"schema",
"secrecy",
"serde",
@@ -6370,9 +6373,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20"
[[package]]
name = "tokio"
-version = "1.42.0"
+version = "1.43.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5cec9b21b0450273377fc97bd4c33a8acffc8c996c987a7c5b319a0083707551"
+checksum = "3d61fa4ffa3de412bfea335c6ecff681de2b609ba3c77ef3e00e521813a9ed9e"
dependencies = [
"backtrace",
"bytes",
@@ -6399,9 +6402,9 @@ dependencies = [
[[package]]
name = "tokio-macros"
-version = "2.4.0"
+version = "2.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752"
+checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8"
dependencies = [
"proc-macro2",
"quote",
diff --git a/Cargo.toml b/Cargo.toml
index 2b97362fa8..7bb9bda703 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -71,8 +71,8 @@ datafusion-proto = { git = "https://github.com/influxdata/arrow-datafusion.git",
dashmap = "6.1.0"
dotenvy = "0.15.7"
flate2 = "1.0.27"
-futures = "0.3.28"
-futures-util = "0.3.30"
+futures = "0.3.31"
+futures-util = "0.3.31"
hashbrown = { version = "0.15.1", features = ["serde"] }
hex = "0.4.3"
http = "0.2.9"
@@ -101,6 +101,7 @@ prost-build = "0.12.6"
prost-types = "0.12.6"
proptest = { version = "1", default-features = false, features = ["std"] }
rand = "0.8.5"
+regex = "1.11.1"
reqwest = { version = "0.11.27", default-features = false, features = ["rustls-tls", "stream", "json"] }
secrecy = "0.8.0"
serde = { version = "1.0", features = ["derive"] }
@@ -117,8 +118,8 @@ sysinfo = "0.30.8"
tempfile = "3.14.0"
test-log = { version = "0.2.16", features = ["trace"] }
thiserror = "1.0"
-tokio = { version = "1.42", features = ["full"] }
-tokio-util = "0.7.9"
+tokio = { version = "1.43", features = ["full"] }
+tokio-util = "0.7.13"
tonic = { version = "0.11.0", features = ["tls", "tls-roots"] }
tonic-build = "0.11.0"
tonic-health = "0.11.0"
diff --git a/influxdb3/tests/server/query.rs b/influxdb3/tests/server/query.rs
index 8d376f13f5..1cc4437f2e 100644
--- a/influxdb3/tests/server/query.rs
+++ b/influxdb3/tests/server/query.rs
@@ -1,3 +1,5 @@
+use core::str;
+
use crate::TestServer;
use futures::StreamExt;
use hyper::StatusCode;
@@ -1582,6 +1584,104 @@ async fn api_v1_query_uri_and_body() {
}
}
+#[tokio::test]
+async fn api_v1_query_group_by() {
+ let server = TestServer::spawn().await;
+
+ server
+ .write_lp_to_db(
+ "foo",
+ "\
+ bar,t1=a,t2=aa val=1 2998574931\n\
+ bar,t1=b,t2=aa val=2 2998574932\n\
+ bar,t1=a,t2=bb val=3 2998574933\n\
+ bar,t1=b,t2=bb val=4 2998574934",
+ Precision::Second,
+ )
+ .await
+ .unwrap();
+
+ for (chunked, query) in [
+ (false, "select * from bar group by t1"),
+ (true, "select * from bar group by t1"),
+ (false, "select * from bar group by t1, t2"),
+ (true, "select * from bar group by t1, t2"),
+ (false, "select * from bar group by /t/"),
+ (true, "select * from bar group by /t/"),
+ (false, "select * from bar group by /t[1]/"),
+ (true, "select * from bar group by /t[1]/"),
+ (false, "select * from bar group by /t[1,2]/"),
+ (true, "select * from bar group by /t[1,2]/"),
+ (false, "select * from bar group by t1, t2, t3"),
+ (true, "select * from bar group by t1, t2, t3"),
+ (false, "select * from bar group by *"),
+ (true, "select * from bar group by *"),
+ (false, "select * from bar group by /not_a_match/"),
+ (true, "select * from bar group by /not_a_match/"),
+ ] {
+ let params = vec![
+ ("db", "foo"),
+ ("q", query),
+ ("chunked", if chunked { "true" } else { "false" }),
+ ];
+ let stream = server.api_v1_query(¶ms, None).await.bytes_stream();
+ let values = stream
+ .map(|chunk| serde_json::from_slice(&chunk.unwrap()).unwrap())
+ .collect::<Vec<Value>>()
+ .await;
+ // Use a snapshot to assert on the output structure. This deserializes each emitted line as
+ // as JSON first, then combines and collects them into a Vec<Value> to serialize into a JSON
+ // array for the snapshot.
+ insta::with_settings!({
+ description => format!("query: {query}, chunked: {chunked}"),
+ }, {
+ insta::assert_json_snapshot!(values);
+ });
+ }
+}
+
+#[tokio::test]
+async fn api_v1_query_group_by_with_nulls() {
+ let server = TestServer::spawn().await;
+
+ server
+ .write_lp_to_db(
+ "foo",
+ "\
+ bar,t1=a val=1 2998574931\n\
+ bar val=2 2998574932\n\
+ bar,t1=a val=3 2998574933\n\
+ ",
+ Precision::Second,
+ )
+ .await
+ .unwrap();
+
+ for (chunked, query) in [
+ (false, "select * from bar group by t1"),
+ (true, "select * from bar group by t1"),
+ ] {
+ let params = vec![
+ ("db", "foo"),
+ ("q", query),
+ ("chunked", if chunked { "true" } else { "false" }),
+ ];
+ let stream = server.api_v1_query(¶ms, None).await.bytes_stream();
+ let values = stream
+ .map(|chunk| serde_json::from_slice(&chunk.unwrap()).unwrap())
+ .collect::<Vec<Value>>()
+ .await;
+ // Use a snapshot to assert on the output structure. This deserializes each emitted line as
+ // as JSON first, then combines and collects them into a Vec<Value> to serialize into a JSON
+ // array for the snapshot.
+ insta::with_settings!({
+ description => format!("query: {query}, chunked: {chunked}"),
+ }, {
+ insta::assert_json_snapshot!(values);
+ });
+ }
+}
+
#[tokio::test]
async fn api_v3_query_sql_distinct_cache() {
let server = TestServer::spawn().await;
diff --git a/influxdb3/tests/server/snapshots/server__query__api_v1_query_group_by-10.snap b/influxdb3/tests/server/snapshots/server__query__api_v1_query_group_by-10.snap
new file mode 100644
index 0000000000..f9555d9337
--- /dev/null
+++ b/influxdb3/tests/server/snapshots/server__query__api_v1_query_group_by-10.snap
@@ -0,0 +1,111 @@
+---
+source: influxdb3/tests/server/query.rs
+description: "query: select * from bar group by /t[1,2]/, chunked: true"
+expression: values
+---
+[
+ {
+ "results": [
+ {
+ "series": [
+ {
+ "columns": [
+ "time",
+ "val"
+ ],
+ "name": "bar",
+ "tags": {
+ "t1": "a",
+ "t2": "aa"
+ },
+ "values": [
+ [
+ "2065-01-07T17:28:51Z",
+ 1.0
+ ]
+ ]
+ }
+ ],
+ "statement_id": 0
+ }
+ ]
+ },
+ {
+ "results": [
+ {
+ "series": [
+ {
+ "columns": [
+ "time",
+ "val"
+ ],
+ "name": "bar",
+ "tags": {
+ "t1": "a",
+ "t2": "bb"
+ },
+ "values": [
+ [
+ "2065-01-07T17:28:53Z",
+ 3.0
+ ]
+ ]
+ }
+ ],
+ "statement_id": 0
+ }
+ ]
+ },
+ {
+ "results": [
+ {
+ "series": [
+ {
+ "columns": [
+ "time",
+ "val"
+ ],
+ "name": "bar",
+ "tags": {
+ "t1": "b",
+ "t2": "aa"
+ },
+ "values": [
+ [
+ "2065-01-07T17:28:52Z",
+ 2.0
+ ]
+ ]
+ }
+ ],
+ "statement_id": 0
+ }
+ ]
+ },
+ {
+ "results": [
+ {
+ "series": [
+ {
+ "columns": [
+ "time",
+ "val"
+ ],
+ "name": "bar",
+ "tags": {
+ "t1": "b",
+ "t2": "bb"
+ },
+ "values": [
+ [
+ "2065-01-07T17:28:54Z",
+ 4.0
+ ]
+ ]
+ }
+ ],
+ "statement_id": 0
+ }
+ ]
+ }
+]
diff --git a/influxdb3/tests/server/snapshots/server__query__api_v1_query_group_by-11.snap b/influxdb3/tests/server/snapshots/server__query__api_v1_query_group_by-11.snap
new file mode 100644
index 0000000000..8022b0794c
--- /dev/null
+++ b/influxdb3/tests/server/snapshots/server__query__api_v1_query_group_by-11.snap
@@ -0,0 +1,88 @@
+---
+source: influxdb3/tests/server/query.rs
+description: "query: select * from bar group by t1, t2, t3, chunked: false"
+expression: values
+---
+[
+ {
+ "results": [
+ {
+ "series": [
+ {
+ "columns": [
+ "time",
+ "val"
+ ],
+ "name": "bar",
+ "tags": {
+ "t1": "b",
+ "t2": "bb",
+ "t3": ""
+ },
+ "values": [
+ [
+ "2065-01-07T17:28:54Z",
+ 4.0
+ ]
+ ]
+ },
+ {
+ "columns": [
+ "time",
+ "val"
+ ],
+ "name": "bar",
+ "tags": {
+ "t1": "b",
+ "t2": "aa",
+ "t3": ""
+ },
+ "values": [
+ [
+ "2065-01-07T17:28:52Z",
+ 2.0
+ ]
+ ]
+ },
+ {
+ "columns": [
+ "time",
+ "val"
+ ],
+ "name": "bar",
+ "tags": {
+ "t1": "a",
+ "t2": "bb",
+ "t3": ""
+ },
+ "values": [
+ [
+ "2065-01-07T17:28:53Z",
+ 3.0
+ ]
+ ]
+ },
+ {
+ "columns": [
+ "time",
+ "val"
+ ],
+ "name": "bar",
+ "tags": {
+ "t1": "a",
+ "t2": "aa",
+ "t3": ""
+ },
+ "values": [
+ [
+ "2065-01-07T17:28:51Z",
+ 1.0
+ ]
+ ]
+ }
+ ],
+ "statement_id": 0
+ }
+ ]
+ }
+]
diff --git a/influxdb3/tests/server/snapshots/server__query__api_v1_query_group_by-12.snap b/influxdb3/tests/server/snapshots/server__query__api_v1_query_group_by-12.snap
new file mode 100644
index 0000000000..76f9493fe0
--- /dev/null
+++ b/influxdb3/tests/server/snapshots/server__query__api_v1_query_group_by-12.snap
@@ -0,0 +1,115 @@
+---
+source: influxdb3/tests/server/query.rs
+description: "query: select * from bar group by t1, t2, t3, chunked: true"
+expression: values
+---
+[
+ {
+ "results": [
+ {
+ "series": [
+ {
+ "columns": [
+ "time",
+ "val"
+ ],
+ "name": "bar",
+ "tags": {
+ "t1": "a",
+ "t2": "aa",
+ "t3": ""
+ },
+ "values": [
+ [
+ "2065-01-07T17:28:51Z",
+ 1.0
+ ]
+ ]
+ }
+ ],
+ "statement_id": 0
+ }
+ ]
+ },
+ {
+ "results": [
+ {
+ "series": [
+ {
+ "columns": [
+ "time",
+ "val"
+ ],
+ "name": "bar",
+ "tags": {
+ "t1": "a",
+ "t2": "bb",
+ "t3": ""
+ },
+ "values": [
+ [
+ "2065-01-07T17:28:53Z",
+ 3.0
+ ]
+ ]
+ }
+ ],
+ "statement_id": 0
+ }
+ ]
+ },
+ {
+ "results": [
+ {
+ "series": [
+ {
+ "columns": [
+ "time",
+ "val"
+ ],
+ "name": "bar",
+ "tags": {
+ "t1": "b",
+ "t2": "aa",
+ "t3": ""
+ },
+ "values": [
+ [
+ "2065-01-07T17:28:52Z",
+ 2.0
+ ]
+ ]
+ }
+ ],
+ "statement_id": 0
+ }
+ ]
+ },
+ {
+ "results": [
+ {
+ "series": [
+ {
+ "columns": [
+ "time",
+ "val"
+ ],
+ "name": "bar",
+ "tags": {
+ "t1": "b",
+ "t2": "bb",
+ "t3": ""
+ },
+ "values": [
+ [
+ "2065-01-07T17:28:54Z",
+ 4.0
+ ]
+ ]
+ }
+ ],
+ "statement_id": 0
+ }
+ ]
+ }
+]
diff --git a/influxdb3/tests/server/snapshots/server__query__api_v1_query_group_by-13.snap b/influxdb3/tests/server/snapshots/server__query__api_v1_query_group_by-13.snap
new file mode 100644
index 0000000000..266739bb0e
--- /dev/null
+++ b/influxdb3/tests/server/snapshots/server__query__api_v1_query_group_by-13.snap
@@ -0,0 +1,84 @@
+---
+source: influxdb3/tests/server/query.rs
+description: "query: select * from bar group by *, chunked: false"
+expression: values
+---
+[
+ {
+ "results": [
+ {
+ "series": [
+ {
+ "columns": [
+ "time",
+ "val"
+ ],
+ "name": "bar",
+ "tags": {
+ "t1": "b",
+ "t2": "bb"
+ },
+ "values": [
+ [
+ "2065-01-07T17:28:54Z",
+ 4.0
+ ]
+ ]
+ },
+ {
+ "columns": [
+ "time",
+ "val"
+ ],
+ "name": "bar",
+ "tags": {
+ "t1": "b",
+ "t2": "aa"
+ },
+ "values": [
+ [
+ "2065-01-07T17:28:52Z",
+ 2.0
+ ]
+ ]
+ },
+ {
+ "columns": [
+ "time",
+ "val"
+ ],
+ "name": "bar",
+ "tags": {
+ "t1": "a",
+ "t2": "bb"
+ },
+ "values": [
+ [
+ "2065-01-07T17:28:53Z",
+ 3.0
+ ]
+ ]
+ },
+ {
+ "columns": [
+ "time",
+ "val"
+ ],
+ "name": "bar",
+ "tags": {
+ "t1": "a",
+ "t2": "aa"
+ },
+ "values": [
+ [
+ "2065-01-07T17:28:51Z",
+ 1.0
+ ]
+ ]
+ }
+ ],
+ "statement_id": 0
+ }
+ ]
+ }
+]
diff --git a/influxdb3/tests/server/snapshots/server__query__api_v1_query_group_by-14.snap b/influxdb3/tests/server/snapshots/server__query__api_v1_query_group_by-14.snap
new file mode 100644
index 0000000000..8fa39fee9a
--- /dev/null
+++ b/influxdb3/tests/server/snapshots/server__query__api_v1_query_group_by-14.snap
@@ -0,0 +1,111 @@
+---
+source: influxdb3/tests/server/query.rs
+description: "query: select * from bar group by *, chunked: true"
+expression: values
+---
+[
+ {
+ "results": [
+ {
+ "series": [
+ {
+ "columns": [
+ "time",
+ "val"
+ ],
+ "name": "bar",
+ "tags": {
+ "t1": "a",
+ "t2": "aa"
+ },
+ "values": [
+ [
+ "2065-01-07T17:28:51Z",
+ 1.0
+ ]
+ ]
+ }
+ ],
+ "statement_id": 0
+ }
+ ]
+ },
+ {
+ "results": [
+ {
+ "series": [
+ {
+ "columns": [
+ "time",
+ "val"
+ ],
+ "name": "bar",
+ "tags": {
+ "t1": "a",
+ "t2": "bb"
+ },
+ "values": [
+ [
+ "2065-01-07T17:28:53Z",
+ 3.0
+ ]
+ ]
+ }
+ ],
+ "statement_id": 0
+ }
+ ]
+ },
+ {
+ "results": [
+ {
+ "series": [
+ {
+ "columns": [
+ "time",
+ "val"
+ ],
+ "name": "bar",
+ "tags": {
+ "t1": "b",
+ "t2": "aa"
+ },
+ "values": [
+ [
+ "2065-01-07T17:28:52Z",
+ 2.0
+ ]
+ ]
+ }
+ ],
+ "statement_id": 0
+ }
+ ]
+ },
+ {
+ "results": [
+ {
+ "series": [
+ {
+ "columns": [
+ "time",
+ "val"
+ ],
+ "name": "bar",
+ "tags": {
+ "t1": "b",
+ "t2": "bb"
+ },
+ "values": [
+ [
+ "2065-01-07T17:28:54Z",
+ 4.0
+ ]
+ ]
+ }
+ ],
+ "statement_id": 0
+ }
+ ]
+ }
+]
diff --git a/influxdb3/tests/server/snapshots/server__query__api_v1_query_group_by-15.snap b/influxdb3/tests/server/snapshots/server__query__api_v1_query_group_by-15.snap
new file mode 100644
index 0000000000..ab892c1d0d
--- /dev/null
+++ b/influxdb3/tests/server/snapshots/server__query__api_v1_query_group_by-15.snap
@@ -0,0 +1,41 @@
+---
+source: influxdb3/tests/server/query.rs
+description: "query: select * from bar group by /not_a_match/, chunked: false"
+expression: values
+---
+[
+ {
+ "results": [
+ {
+ "series": [
+ {
+ "columns": [
+ "time",
+ "val"
+ ],
+ "name": "bar",
+ "values": [
+ [
+ "2065-01-07T17:28:51Z",
+ 1.0
+ ],
+ [
+ "2065-01-07T17:28:52Z",
+ 2.0
+ ],
+ [
+ "2065-01-07T17:28:53Z",
+ 3.0
+ ],
+ [
+ "2065-01-07T17:28:54Z",
+ 4.0
+ ]
+ ]
+ }
+ ],
+ "statement_id": 0
+ }
+ ]
+ }
+]
diff --git a/influxdb3/tests/server/snapshots/server__query__api_v1_query_group_by-16.snap b/influxdb3/tests/server/snapshots/server__query__api_v1_query_group_by-16.snap
new file mode 100644
index 0000000000..e5136b02d7
--- /dev/null
+++ b/influxdb3/tests/server/snapshots/server__query__api_v1_query_group_by-16.snap
@@ -0,0 +1,41 @@
+---
+source: influxdb3/tests/server/query.rs
+description: "query: select * from bar group by /not_a_match/, chunked: true"
+expression: values
+---
+[
+ {
+ "results": [
+ {
+ "series": [
+ {
+ "columns": [
+ "time",
+ "val"
+ ],
+ "name": "bar",
+ "values": [
+ [
+ "2065-01-07T17:28:51Z",
+ 1.0
+ ],
+ [
+ "2065-01-07T17:28:52Z",
+ 2.0
+ ],
+ [
+ "2065-01-07T17:28:53Z",
+ 3.0
+ ],
+ [
+ "2065-01-07T17:28:54Z",
+ 4.0
+ ]
+ ]
+ }
+ ],
+ "statement_id": 0
+ }
+ ]
+ }
+]
diff --git a/influxdb3/tests/server/snapshots/server__query__api_v1_query_group_by-2.snap b/influxdb3/tests/server/snapshots/server__query__api_v1_query_group_by-2.snap
new file mode 100644
index 0000000000..5c862a5535
--- /dev/null
+++ b/influxdb3/tests/server/snapshots/server__query__api_v1_query_group_by-2.snap
@@ -0,0 +1,71 @@
+---
+source: influxdb3/tests/server/query.rs
+description: "query: select * from bar group by t1, chunked: true"
+expression: values
+---
+[
+ {
+ "results": [
+ {
+ "series": [
+ {
+ "columns": [
+ "time",
+ "t2",
+ "val"
+ ],
+ "name": "bar",
+ "tags": {
+ "t1": "a"
+ },
+ "values": [
+ [
+ "2065-01-07T17:28:51Z",
+ "aa",
+ 1.0
+ ],
+ [
+ "2065-01-07T17:28:53Z",
+ "bb",
+ 3.0
+ ]
+ ]
+ }
+ ],
+ "statement_id": 0
+ }
+ ]
+ },
+ {
+ "results": [
+ {
+ "series": [
+ {
+ "columns": [
+ "time",
+ "t2",
+ "val"
+ ],
+ "name": "bar",
+ "tags": {
+ "t1": "b"
+ },
+ "values": [
+ [
+ "2065-01-07T17:28:52Z",
+ "aa",
+ 2.0
+ ],
+ [
+ "2065-01-07T17:28:54Z",
+ "bb",
+ 4.0
+ ]
+ ]
+ }
+ ],
+ "statement_id": 0
+ }
+ ]
+ }
+]
diff --git a/influxdb3/tests/server/snapshots/server__query__api_v1_query_group_by-3.snap b/influxdb3/tests/server/snapshots/server__query__api_v1_query_group_by-3.snap
new file mode 100644
index 0000000000..f6e395addd
--- /dev/null
+++ b/influxdb3/tests/server/snapshots/server__query__api_v1_query_group_by-3.snap
@@ -0,0 +1,84 @@
+---
+source: influxdb3/tests/server/query.rs
+description: "query: select * from bar group by t1, t2, chunked: false"
+expression: values
+---
+[
+ {
+ "results": [
+ {
+ "series": [
+ {
+ "columns": [
+ "time",
+ "val"
+ ],
+ "name": "bar",
+ "tags": {
+ "t1": "b",
+ "t2": "bb"
+ },
+ "values": [
+ [
+ "2065-01-07T17:28:54Z",
+ 4.0
+ ]
+ ]
+ },
+ {
+ "columns": [
+ "time",
+ "val"
+ ],
+ "name": "bar",
+ "tags": {
+ "t1": "b",
+ "t2": "aa"
+ },
+ "values": [
+ [
+ "2065-01-07T17:28:52Z",
+ 2.0
+ ]
+ ]
+ },
+ {
+ "columns": [
+ "time",
+ "val"
+ ],
+ "name": "bar",
+ "tags": {
+ "t1": "a",
+ "t2": "bb"
+ },
+ "values": [
+ [
+ "2065-01-07T17:28:53Z",
+ 3.0
+ ]
+ ]
+ },
+ {
+ "columns": [
+ "time",
+ "val"
+ ],
+ "name": "bar",
+ "tags": {
+ "t1": "a",
+ "t2": "aa"
+ },
+ "values": [
+ [
+ "2065-01-07T17:28:51Z",
+ 1.0
+ ]
+ ]
+ }
+ ],
+ "statement_id": 0
+ }
+ ]
+ }
+]
diff --git a/influxdb3/tests/server/snapshots/server__query__api_v1_query_group_by-4.snap b/influxdb3/tests/server/snapshots/server__query__api_v1_query_group_by-4.snap
new file mode 100644
index 0000000000..783b65cc02
--- /dev/null
+++ b/influxdb3/tests/server/snapshots/server__query__api_v1_query_group_by-4.snap
@@ -0,0 +1,111 @@
+---
+source: influxdb3/tests/server/query.rs
+description: "query: select * from bar group by t1, t2, chunked: true"
+expression: values
+---
+[
+ {
+ "results": [
+ {
+ "series": [
+ {
+ "columns": [
+ "time",
+ "val"
+ ],
+ "name": "bar",
+ "tags": {
+ "t1": "a",
+ "t2": "aa"
+ },
+ "values": [
+ [
+ "2065-01-07T17:28:51Z",
+ 1.0
+ ]
+ ]
+ }
+ ],
+ "statement_id": 0
+ }
+ ]
+ },
+ {
+ "results": [
+ {
+ "series": [
+ {
+ "columns": [
+ "time",
+ "val"
+ ],
+ "name": "bar",
+ "tags": {
+ "t1": "a",
+ "t2": "bb"
+ },
+ "values": [
+ [
+ "2065-01-07T17:28:53Z",
+ 3.0
+ ]
+ ]
+ }
+ ],
+ "statement_id": 0
+ }
+ ]
+ },
+ {
+ "results": [
+ {
+ "series": [
+ {
+ "columns": [
+ "time",
+ "val"
+ ],
+ "name": "bar",
+ "tags": {
+ "t1": "b",
+ "t2": "aa"
+ },
+ "values": [
+ [
+ "2065-01-07T17:28:52Z",
+ 2.0
+ ]
+ ]
+ }
+ ],
+ "statement_id": 0
+ }
+ ]
+ },
+ {
+ "results": [
+ {
+ "series": [
+ {
+ "columns": [
+ "time",
+ "val"
+ ],
+ "name": "bar",
+ "tags": {
+ "t1": "b",
+ "t2": "bb"
+ },
+ "values": [
+ [
+ "2065-01-07T17:28:54Z",
+ 4.0
+ ]
+ ]
+ }
+ ],
+ "statement_id": 0
+ }
+ ]
+ }
+]
diff --git a/influxdb3/tests/server/snapshots/server__query__api_v1_query_group_by-5.snap b/influxdb3/tests/server/snapshots/server__query__api_v1_query_group_by-5.snap
new file mode 100644
index 0000000000..0ee623c4fc
--- /dev/null
+++ b/influxdb3/tests/server/snapshots/server__query__api_v1_query_group_by-5.snap
@@ -0,0 +1,84 @@
+---
+source: influxdb3/tests/server/query.rs
+description: "query: select * from bar group by /t/, chunked: false"
+expression: values
+---
+[
+ {
+ "results": [
+ {
+ "series": [
+ {
+ "columns": [
+ "time",
+ "val"
+ ],
+ "name": "bar",
+ "tags": {
+ "t1": "b",
+ "t2": "bb"
+ },
+ "values": [
+ [
+ "2065-01-07T17:28:54Z",
+ 4.0
+ ]
+ ]
+ },
+ {
+ "columns": [
+ "time",
+ "val"
+ ],
+ "name": "bar",
+ "tags": {
+ "t1": "b",
+ "t2": "aa"
+ },
+ "values": [
+ [
+ "2065-01-07T17:28:52Z",
+ 2.0
+ ]
+ ]
+ },
+ {
+ "columns": [
+ "time",
+ "val"
+ ],
+ "name": "bar",
+ "tags": {
+ "t1": "a",
+ "t2": "bb"
+ },
+ "values": [
+ [
+ "2065-01-07T17:28:53Z",
+ 3.0
+ ]
+ ]
+ },
+ {
+ "columns": [
+ "time",
+ "val"
+ ],
+ "name": "bar",
+ "tags": {
+ "t1": "a",
+ "t2": "aa"
+ },
+ "values": [
+ [
+ "2065-01-07T17:28:51Z",
+ 1.0
+ ]
+ ]
+ }
+ ],
+ "statement_id": 0
+ }
+ ]
+ }
+]
diff --git a/influxdb3/tests/server/snapshots/server__query__api_v1_query_group_by-6.snap b/influxdb3/tests/server/snapshots/server__query__api_v1_query_group_by-6.snap
new file mode 100644
index 0000000000..ea8fd5ba59
--- /dev/null
+++ b/influxdb3/tests/server/snapshots/server__query__api_v1_query_group_by-6.snap
@@ -0,0 +1,111 @@
+---
+source: influxdb3/tests/server/query.rs
+description: "query: select * from bar group by /t/, chunked: true"
+expression: values
+---
+[
+ {
+ "results": [
+ {
+ "series": [
+ {
+ "columns": [
+ "time",
+ "val"
+ ],
+ "name": "bar",
+ "tags": {
+ "t1": "a",
+ "t2": "aa"
+ },
+ "values": [
+ [
+ "2065-01-07T17:28:51Z",
+ 1.0
+ ]
+ ]
+ }
+ ],
+ "statement_id": 0
+ }
+ ]
+ },
+ {
+ "results": [
+ {
+ "series": [
+ {
+ "columns": [
+ "time",
+ "val"
+ ],
+ "name": "bar",
+ "tags": {
+ "t1": "a",
+ "t2": "bb"
+ },
+ "values": [
+ [
+ "2065-01-07T17:28:53Z",
+ 3.0
+ ]
+ ]
+ }
+ ],
+ "statement_id": 0
+ }
+ ]
+ },
+ {
+ "results": [
+ {
+ "series": [
+ {
+ "columns": [
+ "time",
+ "val"
+ ],
+ "name": "bar",
+ "tags": {
+ "t1": "b",
+ "t2": "aa"
+ },
+ "values": [
+ [
+ "2065-01-07T17:28:52Z",
+ 2.0
+ ]
+ ]
+ }
+ ],
+ "statement_id": 0
+ }
+ ]
+ },
+ {
+ "results": [
+ {
+ "series": [
+ {
+ "columns": [
+ "time",
+ "val"
+ ],
+ "name": "bar",
+ "tags": {
+ "t1": "b",
+ "t2": "bb"
+ },
+ "values": [
+ [
+ "2065-01-07T17:28:54Z",
+ 4.0
+ ]
+ ]
+ }
+ ],
+ "statement_id": 0
+ }
+ ]
+ }
+]
diff --git a/influxdb3/tests/server/snapshots/server__query__api_v1_query_group_by-7.snap b/influxdb3/tests/server/snapshots/server__query__api_v1_query_group_by-7.snap
new file mode 100644
index 0000000000..8d0641f5c4
--- /dev/null
+++ b/influxdb3/tests/server/snapshots/server__query__api_v1_query_group_by-7.snap
@@ -0,0 +1,56 @@
+---
+source: influxdb3/tests/server/query.rs
+description: "query: select * from bar group by /t[1]/, chunked: false"
+expression: values
+---
+[
+ {
+ "results": [
+ {
+ "series": [
+ {
+ "columns": [
+ "time",
+ "val"
+ ],
+ "name": "bar",
+ "tags": {
+ "t1": "b"
+ },
+ "values": [
+ [
+ "2065-01-07T17:28:52Z",
+ 2.0
+ ],
+ [
+ "2065-01-07T17:28:54Z",
+ 4.0
+ ]
+ ]
+ },
+ {
+ "columns": [
+ "time",
+ "val"
+ ],
+ "name": "bar",
+ "tags": {
+ "t1": "a"
+ },
+ "values": [
+ [
+ "2065-01-07T17:28:51Z",
+ 1.0
+ ],
+ [
+ "2065-01-07T17:28:53Z",
+ 3.0
+ ]
+ ]
+ }
+ ],
+ "statement_id": 0
+ }
+ ]
+ }
+]
diff --git a/influxdb3/tests/server/snapshots/server__query__api_v1_query_group_by-8.snap b/influxdb3/tests/server/snapshots/server__query__api_v1_query_group_by-8.snap
new file mode 100644
index 0000000000..be3ec2a5c0
--- /dev/null
+++ b/influxdb3/tests/server/snapshots/server__query__api_v1_query_group_by-8.snap
@@ -0,0 +1,65 @@
+---
+source: influxdb3/tests/server/query.rs
+description: "query: select * from bar group by /t[1]/, chunked: true"
+expression: values
+---
+[
+ {
+ "results": [
+ {
+ "series": [
+ {
+ "columns": [
+ "time",
+ "val"
+ ],
+ "name": "bar",
+ "tags": {
+ "t1": "a"
+ },
+ "values": [
+ [
+ "2065-01-07T17:28:51Z",
+ 1.0
+ ],
+ [
+ "2065-01-07T17:28:53Z",
+ 3.0
+ ]
+ ]
+ }
+ ],
+ "statement_id": 0
+ }
+ ]
+ },
+ {
+ "results": [
+ {
+ "series": [
+ {
+ "columns": [
+ "time",
+ "val"
+ ],
+ "name": "bar",
+ "tags": {
+ "t1": "b"
+ },
+ "values": [
+ [
+ "2065-01-07T17:28:52Z",
+ 2.0
+ ],
+ [
+ "2065-01-07T17:28:54Z",
+ 4.0
+ ]
+ ]
+ }
+ ],
+ "statement_id": 0
+ }
+ ]
+ }
+]
diff --git a/influxdb3/tests/server/snapshots/server__query__api_v1_query_group_by-9.snap b/influxdb3/tests/server/snapshots/server__query__api_v1_query_group_by-9.snap
new file mode 100644
index 0000000000..266739bb0e
--- /dev/null
+++ b/influxdb3/tests/server/snapshots/server__query__api_v1_query_group_by-9.snap
@@ -0,0 +1,84 @@
+---
+source: influxdb3/tests/server/query.rs
+description: "query: select * from bar group by *, chunked: false"
+expression: values
+---
+[
+ {
+ "results": [
+ {
+ "series": [
+ {
+ "columns": [
+ "time",
+ "val"
+ ],
+ "name": "bar",
+ "tags": {
+ "t1": "b",
+ "t2": "bb"
+ },
+ "values": [
+ [
+ "2065-01-07T17:28:54Z",
+ 4.0
+ ]
+ ]
+ },
+ {
+ "columns": [
+ "time",
+ "val"
+ ],
+ "name": "bar",
+ "tags": {
+ "t1": "b",
+ "t2": "aa"
+ },
+ "values": [
+ [
+ "2065-01-07T17:28:52Z",
+ 2.0
+ ]
+ ]
+ },
+ {
+ "columns": [
+ "time",
+ "val"
+ ],
+ "name": "bar",
+ "tags": {
+ "t1": "a",
+ "t2": "bb"
+ },
+ "values": [
+ [
+ "2065-01-07T17:28:53Z",
+ 3.0
+ ]
+ ]
+ },
+ {
+ "columns": [
+ "time",
+ "val"
+ ],
+ "name": "bar",
+ "tags": {
+ "t1": "a",
+ "t2": "aa"
+ },
+ "values": [
+ [
+ "2065-01-07T17:28:51Z",
+ 1.0
+ ]
+ ]
+ }
+ ],
+ "statement_id": 0
+ }
+ ]
+ }
+]
diff --git a/influxdb3/tests/server/snapshots/server__query__api_v1_query_group_by.snap b/influxdb3/tests/server/snapshots/server__query__api_v1_query_group_by.snap
new file mode 100644
index 0000000000..2d94ee4a1c
--- /dev/null
+++ b/influxdb3/tests/server/snapshots/server__query__api_v1_query_group_by.snap
@@ -0,0 +1,62 @@
+---
+source: influxdb3/tests/server/query.rs
+description: "query: select * from bar group by t1, chunked: false"
+expression: values
+---
+[
+ {
+ "results": [
+ {
+ "series": [
+ {
+ "columns": [
+ "time",
+ "t2",
+ "val"
+ ],
+ "name": "bar",
+ "tags": {
+ "t1": "b"
+ },
+ "values": [
+ [
+ "2065-01-07T17:28:52Z",
+ "aa",
+ 2.0
+ ],
+ [
+ "2065-01-07T17:28:54Z",
+ "bb",
+ 4.0
+ ]
+ ]
+ },
+ {
+ "columns": [
+ "time",
+ "t2",
+ "val"
+ ],
+ "name": "bar",
+ "tags": {
+ "t1": "a"
+ },
+ "values": [
+ [
+ "2065-01-07T17:28:51Z",
+ "aa",
+ 1.0
+ ],
+ [
+ "2065-01-07T17:28:53Z",
+ "bb",
+ 3.0
+ ]
+ ]
+ }
+ ],
+ "statement_id": 0
+ }
+ ]
+ }
+]
diff --git a/influxdb3/tests/server/snapshots/server__query__api_v1_query_group_by_with_nulls-2.snap b/influxdb3/tests/server/snapshots/server__query__api_v1_query_group_by_with_nulls-2.snap
new file mode 100644
index 0000000000..26d347023f
--- /dev/null
+++ b/influxdb3/tests/server/snapshots/server__query__api_v1_query_group_by_with_nulls-2.snap
@@ -0,0 +1,61 @@
+---
+source: influxdb3/tests/server/query.rs
+description: "query: select * from bar group by t1, chunked: true"
+expression: values
+---
+[
+ {
+ "results": [
+ {
+ "series": [
+ {
+ "columns": [
+ "time",
+ "val"
+ ],
+ "name": "bar",
+ "tags": {
+ "t1": ""
+ },
+ "values": [
+ [
+ "2065-01-07T17:28:52Z",
+ 2.0
+ ]
+ ]
+ }
+ ],
+ "statement_id": 0
+ }
+ ]
+ },
+ {
+ "results": [
+ {
+ "series": [
+ {
+ "columns": [
+ "time",
+ "val"
+ ],
+ "name": "bar",
+ "tags": {
+ "t1": "a"
+ },
+ "values": [
+ [
+ "2065-01-07T17:28:51Z",
+ 1.0
+ ],
+ [
+ "2065-01-07T17:28:53Z",
+ 3.0
+ ]
+ ]
+ }
+ ],
+ "statement_id": 0
+ }
+ ]
+ }
+]
diff --git a/influxdb3/tests/server/snapshots/server__query__api_v1_query_group_by_with_nulls.snap b/influxdb3/tests/server/snapshots/server__query__api_v1_query_group_by_with_nulls.snap
new file mode 100644
index 0000000000..e938aa2728
--- /dev/null
+++ b/influxdb3/tests/server/snapshots/server__query__api_v1_query_group_by_with_nulls.snap
@@ -0,0 +1,52 @@
+---
+source: influxdb3/tests/server/query.rs
+description: "query: select * from bar group by t1, chunked: false"
+expression: values
+---
+[
+ {
+ "results": [
+ {
+ "series": [
+ {
+ "columns": [
+ "time",
+ "val"
+ ],
+ "name": "bar",
+ "tags": {
+ "t1": "a"
+ },
+ "values": [
+ [
+ "2065-01-07T17:28:51Z",
+ 1.0
+ ],
+ [
+ "2065-01-07T17:28:53Z",
+ 3.0
+ ]
+ ]
+ },
+ {
+ "columns": [
+ "time",
+ "val"
+ ],
+ "name": "bar",
+ "tags": {
+ "t1": ""
+ },
+ "values": [
+ [
+ "2065-01-07T17:28:52Z",
+ 2.0
+ ]
+ ]
+ }
+ ],
+ "statement_id": 0
+ }
+ ]
+ }
+]
diff --git a/influxdb3_internal_api/Cargo.toml b/influxdb3_internal_api/Cargo.toml
index e2259cf5c1..affafefcb9 100644
--- a/influxdb3_internal_api/Cargo.toml
+++ b/influxdb3_internal_api/Cargo.toml
@@ -7,6 +7,7 @@ license.workspace = true
[dependencies]
# Core Crates
+influxdb_influxql_parser.workspace = true
iox_query.workspace = true
iox_query_params.workspace = true
trace.workspace = true
diff --git a/influxdb3_internal_api/src/query_executor.rs b/influxdb3_internal_api/src/query_executor.rs
index 6a44966b7c..d9fcaef53a 100644
--- a/influxdb3_internal_api/src/query_executor.rs
+++ b/influxdb3_internal_api/src/query_executor.rs
@@ -2,6 +2,7 @@ use async_trait::async_trait;
use datafusion::arrow::error::ArrowError;
use datafusion::common::DataFusionError;
use datafusion::execution::SendableRecordBatchStream;
+use influxdb_influxql_parser::statement::Statement;
use iox_query::query_log::QueryLogEntries;
use iox_query::{QueryDatabase, QueryNamespace};
use iox_query_params::StatementParams;
@@ -30,12 +31,21 @@ pub enum QueryExecutorError {
#[async_trait]
pub trait QueryExecutor: QueryDatabase + Debug + Send + Sync + 'static {
- async fn query(
+ async fn query_sql(
&self,
database: &str,
q: &str,
params: Option<StatementParams>,
- kind: QueryKind,
+ span_ctx: Option<SpanContext>,
+ external_span_ctx: Option<RequestLogContext>,
+ ) -> Result<SendableRecordBatchStream, QueryExecutorError>;
+
+ async fn query_influxql(
+ &self,
+ database_name: &str,
+ query_str: &str,
+ influxql_statement: Statement,
+ params: Option<StatementParams>,
span_ctx: Option<SpanContext>,
external_span_ctx: Option<RequestLogContext>,
) -> Result<SendableRecordBatchStream, QueryExecutorError>;
@@ -54,21 +64,6 @@ pub trait QueryExecutor: QueryDatabase + Debug + Send + Sync + 'static {
fn upcast(&self) -> Arc<(dyn QueryDatabase + 'static)>;
}
-#[derive(Debug, Clone, Copy)]
-pub enum QueryKind {
- Sql,
- InfluxQl,
-}
-
-impl QueryKind {
- pub fn query_type(&self) -> &'static str {
- match self {
- Self::Sql => "sql",
- Self::InfluxQl => "influxql",
- }
- }
-}
-
#[derive(Debug, Copy, Clone)]
pub struct UnimplementedQueryExecutor;
@@ -97,27 +92,34 @@ impl QueryDatabase for UnimplementedQueryExecutor {
#[async_trait]
impl QueryExecutor for UnimplementedQueryExecutor {
- async fn query(
+ async fn query_sql(
&self,
_database: &str,
_q: &str,
_params: Option<StatementParams>,
- _kind: QueryKind,
_span_ctx: Option<SpanContext>,
_external_span_ctx: Option<RequestLogContext>,
) -> Result<SendableRecordBatchStream, QueryExecutorError> {
- Err(QueryExecutorError::DatabaseNotFound {
- db_name: "unimplemented".to_string(),
- })
+ Err(QueryExecutorError::MethodNotImplemented("query_sql"))
+ }
+
+ async fn query_influxql(
+ &self,
+ _database_name: &str,
+ _query_str: &str,
+ _influxql_statement: Statement,
+ _params: Option<StatementParams>,
+ _span_ctx: Option<SpanContext>,
+ _external_span_ctx: Option<RequestLogContext>,
+ ) -> Result<SendableRecordBatchStream, QueryExecutorError> {
+ Err(QueryExecutorError::MethodNotImplemented("query_influxql"))
}
fn show_databases(
&self,
_include_deleted: bool,
) -> Result<SendableRecordBatchStream, QueryExecutorError> {
- Err(QueryExecutorError::DatabaseNotFound {
- db_name: "unimplemented".to_string(),
- })
+ Err(QueryExecutorError::MethodNotImplemented("show_databases"))
}
async fn show_retention_policies(
@@ -125,9 +127,9 @@ impl QueryExecutor for UnimplementedQueryExecutor {
_database: Option<&str>,
_span_ctx: Option<SpanContext>,
) -> Result<SendableRecordBatchStream, QueryExecutorError> {
- Err(QueryExecutorError::DatabaseNotFound {
- db_name: "unimplemented".to_string(),
- })
+ Err(QueryExecutorError::MethodNotImplemented(
+ "show_retention_policies",
+ ))
}
fn upcast(&self) -> Arc<(dyn QueryDatabase + 'static)> {
diff --git a/influxdb3_py_api/src/system_py.rs b/influxdb3_py_api/src/system_py.rs
index 792c12a2a8..db2334307b 100644
--- a/influxdb3_py_api/src/system_py.rs
+++ b/influxdb3_py_api/src/system_py.rs
@@ -8,7 +8,7 @@ use futures::TryStreamExt;
use hashbrown::HashMap;
use influxdb3_catalog::catalog::DatabaseSchema;
use influxdb3_id::TableId;
-use influxdb3_internal_api::query_executor::{QueryExecutor, QueryKind};
+use influxdb3_internal_api::query_executor::QueryExecutor;
use influxdb3_wal::{FieldData, WriteBatch};
use iox_query_params::StatementParams;
use observability_deps::tracing::{error, info, warn};
@@ -139,14 +139,7 @@ impl PyPluginCallApi {
// Spawn the async task
let handle = tokio::spawn(async move {
let res = query_executor
- .query(
- db_schema_name.as_ref(),
- &query,
- params,
- QueryKind::Sql,
- None,
- None,
- )
+ .query_sql(db_schema_name.as_ref(), &query, params, None, None)
.await
.map_err(|e| PyValueError::new_err(format!("Error executing query: {}", e)))?;
diff --git a/influxdb3_server/Cargo.toml b/influxdb3_server/Cargo.toml
index 5e3279a213..7bbdc2f34d 100644
--- a/influxdb3_server/Cargo.toml
+++ b/influxdb3_server/Cargo.toml
@@ -11,6 +11,7 @@ authz.workspace = true
data_types.workspace = true
datafusion_util.workspace = true
influxdb-line-protocol.workspace = true
+influxdb_influxql_parser.workspace = true
iox_catalog.workspace = true
iox_http.workspace = true
iox_query.workspace = true
@@ -67,6 +68,7 @@ mime.workspace = true
object_store.workspace = true
parking_lot.workspace = true
pin-project-lite.workspace = true
+regex.workspace = true
secrecy.workspace = true
serde.workspace = true
serde_json.workspace = true
diff --git a/influxdb3_server/src/http.rs b/influxdb3_server/src/http.rs
index 1f04678970..baf80ad9ad 100644
--- a/influxdb3_server/src/http.rs
+++ b/influxdb3_server/src/http.rs
@@ -23,7 +23,7 @@ use hyper::{Body, Method, Request, Response, StatusCode};
use influxdb3_cache::distinct_cache::{self, CreateDistinctCacheArgs, MaxAge, MaxCardinality};
use influxdb3_cache::last_cache;
use influxdb3_catalog::catalog::Error as CatalogError;
-use influxdb3_internal_api::query_executor::{QueryExecutor, QueryExecutorError, QueryKind};
+use influxdb3_internal_api::query_executor::{QueryExecutor, QueryExecutorError};
use influxdb3_process::{INFLUXDB3_GIT_HASH_SHORT, INFLUXDB3_VERSION};
use influxdb3_processing_engine::manager::ProcessingEngineManager;
use influxdb3_wal::{PluginType, TriggerSpecificationDefinition};
@@ -32,6 +32,8 @@ use influxdb3_write::write_buffer::Error as WriteBufferError;
use influxdb3_write::BufferedWriteRequest;
use influxdb3_write::Precision;
use influxdb3_write::WriteBuffer;
+use influxdb_influxql_parser::select::GroupByClause;
+use influxdb_influxql_parser::statement::Statement;
use iox_http::write::single_tenant::SingleTenantRequestUnifier;
use iox_http::write::v1::V1_NAMESPACE_RP_SEPARATOR;
use iox_http::write::{WriteParseError, WriteRequestUnifier};
@@ -547,7 +549,7 @@ where
let stream = self
.query_executor
- .query(&database, &query_str, params, QueryKind::Sql, None, None)
+ .query_sql(&database, &query_str, params, None, None)
.await?;
Response::builder()
@@ -567,7 +569,7 @@ where
info!(?database, %query_str, ?format, "handling query_influxql");
- let stream = self
+ let (stream, _) = self
.query_influxql_inner(database, &query_str, params)
.await?;
@@ -733,7 +735,7 @@ where
database: Option<String>,
query_str: &str,
params: Option<StatementParams>,
- ) -> Result<SendableRecordBatchStream> {
+ ) -> Result<(SendableRecordBatchStream, Option<GroupByClause>)> {
let mut statements = rewrite::parse_statements(query_str)?;
if statements.len() != 1 {
@@ -756,31 +758,29 @@ where
}
};
- if statement.statement().is_show_databases() {
- self.query_executor.show_databases(true)
- } else if statement.statement().is_show_retention_policies() {
+ let statement = statement.to_statement();
+ let group_by = match &statement {
+ Statement::Select(select_statement) => select_statement.group_by.clone(),
+ _ => None,
+ };
+
+ let stream = if statement.is_show_databases() {
+ self.query_executor.show_databases(true)?
+ } else if statement.is_show_retention_policies() {
self.query_executor
.show_retention_policies(database.as_deref(), None)
- .await
+ .await?
} else {
let Some(database) = database else {
return Err(Error::InfluxqlNoDatabase);
};
self.query_executor
- .query(
- &database,
- // TODO - implement an interface that takes the statement directly,
- // so we don't need to double down on the parsing
- &statement.to_statement().to_string(),
- params,
- QueryKind::InfluxQl,
- None,
- None,
- )
- .await
- }
- .map_err(Into::into)
+ .query_influxql(&database, query_str, statement, params, None, None)
+ .await?
+ };
+
+ Ok((stream, group_by))
}
/// Create a new distinct value cache given the [`DistinctCacheCreateRequest`] arguments in the request
diff --git a/influxdb3_server/src/http/v1.rs b/influxdb3_server/src/http/v1.rs
index b4b915ea0d..73fcf0b474 100644
--- a/influxdb3_server/src/http/v1.rs
+++ b/influxdb3_server/src/http/v1.rs
@@ -1,5 +1,6 @@
use std::{
collections::{HashMap, VecDeque},
+ ops::{Deref, DerefMut},
pin::Pin,
sync::Arc,
task::{Context, Poll},
@@ -17,15 +18,18 @@ use arrow::{
record_batch::RecordBatch,
};
+use arrow_schema::{Field, SchemaRef};
use bytes::Bytes;
use chrono::{format::SecondsFormat, DateTime};
use datafusion::physical_plan::SendableRecordBatchStream;
use futures::{ready, stream::Fuse, Stream, StreamExt};
use hyper::http::HeaderValue;
use hyper::{header::ACCEPT, header::CONTENT_TYPE, Body, Request, Response, StatusCode};
+use influxdb_influxql_parser::select::{Dimension, GroupByClause};
use iox_time::TimeProvider;
use observability_deps::tracing::info;
-use schema::{INFLUXQL_MEASUREMENT_COLUMN_NAME, TIME_COLUMN_NAME};
+use regex::Regex;
+use schema::{InfluxColumnType, INFLUXQL_MEASUREMENT_COLUMN_NAME, TIME_COLUMN_NAME};
use serde::{Deserialize, Serialize};
use serde_json::Value;
@@ -73,9 +77,9 @@ where
// TODO - Currently not supporting parameterized queries, see
// https://github.com/influxdata/influxdb/issues/24805
- let stream = self.query_influxql_inner(database, &query, None).await?;
- let stream =
- QueryResponseStream::new(0, stream, chunk_size, format, epoch).map_err(QueryError)?;
+ let (stream, group_by) = self.query_influxql_inner(database, &query, None).await?;
+ let stream = QueryResponseStream::new(0, stream, chunk_size, format, epoch, group_by)
+ .map_err(QueryError)?;
let body = Body::wrap_stream(stream);
Ok(Response::builder()
@@ -249,7 +253,7 @@ enum Precision {
/// [`anyhow::Error`] is used as a catch-all because if anything fails during
/// that process it will result in a 500 INTERNAL ERROR.
#[derive(Debug, thiserror::Error)]
-#[error("unexpected query error: {0}")]
+#[error("unexpected query error: {0:#}")]
pub struct QueryError(#[from] anyhow::Error);
/// The response structure returned by the v1 query API
@@ -354,6 +358,8 @@ struct StatementResponse {
#[derive(Debug, Serialize)]
struct Series {
name: String,
+ #[serde(skip_serializing_if = "Option::is_none")]
+ tags: Option<HashMap<String, Option<String>>>,
columns: Vec<String>,
values: Vec<Row>,
}
@@ -362,14 +368,29 @@ struct Series {
#[derive(Debug, Serialize)]
struct Row(Vec<Value>);
+impl Deref for Row {
+ type Target = Vec<Value>;
+
+ fn deref(&self) -> &Self::Target {
+ &self.0
+ }
+}
+
+impl DerefMut for Row {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ &mut self.0
+ }
+}
+
/// A buffer for storing records from a stream of [`RecordBatch`]es
///
/// The optional `size` indicates whether this is operating in `chunked` mode (see
/// [`QueryResponseStream`]), and when specified, gives the size of chunks that will
/// be emitted.
+#[derive(Debug)]
struct ChunkBuffer {
size: Option<usize>,
- series: VecDeque<(String, Vec<Row>)>,
+ series: VecDeque<(String, BufferGroupByTagSet, Vec<Row>)>,
}
impl ChunkBuffer {
@@ -382,27 +403,80 @@ impl ChunkBuffer {
/// Get the name of the current measurement [`Series`] being streamed
fn current_measurement_name(&self) -> Option<&str> {
- self.series.front().map(|(n, _)| n.as_str())
+ self.series.front().map(|(n, _, _)| n.as_str())
}
/// For queries that produce multiple [`Series`], this will be called when
/// the current series is completed streaming
fn push_next_measurement<S: Into<String>>(&mut self, name: S) {
- self.series.push_front((name.into(), vec![]));
+ self.series.push_front((name.into(), None, vec![]));
}
/// Push a new [`Row`] into the current measurement [`Series`]
- fn push_row(&mut self, row: Row) -> Result<(), anyhow::Error> {
- self.series
- .front_mut()
- .context("tried to push row with no measurements buffered")?
- .1
- .push(row);
+ ///
+ /// If the stream is producing tags that are part of a `GROUP BY` clause, then `group_by` should
+ /// hold a map of those tag keys to tag values for the given row.
+ fn push_row(
+ &mut self,
+ group_by: Option<HashMap<String, Option<String>>>,
+ row: Row,
+ ) -> Result<(), anyhow::Error> {
+ let (_, tags, rows) = self
+ .series
+ .front()
+ .context("tried to push row with no measurements buffered")?;
+
+ // Usually series are split on the measurement name. This functin is not concerned with
+ // that split, as the caller does that. However, if we are processing a query with a `GROUP BY`
+ // clause, then we make the decision here. If the incoming `group_by` tag key/value pairs do
+ // not match the those for the current row set, then we need to start a new entry in the
+ // `series` on the chunk buffer.
+ use BufferGroupByDecision::*;
+ let group_by_decision = match (tags, &group_by) {
+ (None, None) => NotAGroupBy,
+ (Some(tags), Some(group_by)) => {
+ if group_by.len() == tags.len() {
+ if group_by == tags {
+ NewRowInExistingSet
+ } else {
+ NewSet
+ }
+ } else {
+ bail!(
+ "group by columns in query result and chunk buffer are not the same size"
+ );
+ }
+ }
+ (None, Some(_)) => {
+ if rows.is_empty() {
+ FirstRowInSeries
+ } else {
+ bail!("received inconsistent group by tags in query result");
+ }
+ }
+ (Some(_), None) => bail!(
+ "chunk buffer expects group by tags but none were present in the query result"
+ ),
+ };
+
+ match group_by_decision {
+ NotAGroupBy | NewRowInExistingSet => self.series.front_mut().unwrap().2.push(row),
+ FirstRowInSeries => {
+ let (_, tags, rows) = self.series.front_mut().unwrap();
+ *tags = group_by;
+ rows.push(row);
+ }
+ NewSet => {
+ let name = self.series.front().unwrap().0.clone();
+ self.series.push_front((name, group_by, vec![row]));
+ }
+ }
+
Ok(())
}
/// Flush a single chunk from the [`ChunkBuffer`], if possible
- fn flush_one(&mut self) -> Option<(String, Vec<Row>)> {
+ fn flush_one(&mut self) -> Option<(String, BufferGroupByTagSet, Vec<Row>)> {
if !self.can_flush() {
return None;
}
@@ -411,23 +485,23 @@ impl ChunkBuffer {
if self
.series
.back()
- .is_some_and(|(_, rows)| rows.len() <= size)
+ .is_some_and(|(_, _, rows)| rows.len() <= size)
{
// the back series is smaller than the chunk size, so we just
// pop and take the whole thing:
self.series.pop_back()
} else {
// only drain a chunk's worth from the back series:
- self.series
- .back_mut()
- .map(|(name, rows)| (name.to_owned(), rows.drain(..size).collect()))
+ self.series.back_mut().map(|(name, tags, rows)| {
+ (name.to_owned(), tags.clone(), rows.drain(..size).collect())
+ })
}
}
/// The [`ChunkBuffer`] is operating in chunked mode, and can flush a chunk
fn can_flush(&self) -> bool {
if let (Some(size), Some(m)) = (self.size, self.series.back()) {
- m.1.len() >= size || self.series.len() > 1
+ m.2.len() >= size || self.series.len() > 1
} else {
false
}
@@ -439,6 +513,24 @@ impl ChunkBuffer {
}
}
+/// Convenience type for representing an optional map of tag name to optional tag values
+type BufferGroupByTagSet = Option<HashMap<String, Option<String>>>;
+
+/// Decide how to handle an incoming set of `GROUP BY` tag key value pairs when pushing a row into
+/// the `ChunkBuffer`
+enum BufferGroupByDecision {
+ /// The query is not using a `GROUP BY` with tags
+ NotAGroupBy,
+ /// This is the first time a row has been pushed to the series with this `GROUP BY` tag
+ /// key/value combination
+ FirstRowInSeries,
+ /// Still adding rows to the current set of `GROUP BY` tag key/value pairs
+ NewRowInExistingSet,
+ /// The incoming set of `GROUP BY` tag key/value pairs do not match, so we need to start a
+ /// new row set in the series.
+ NewSet,
+}
+
/// The state of the [`QueryResponseStream`]
enum State {
/// The initial state of the stream; no query results have been streamed
@@ -473,7 +565,7 @@ impl State {
struct QueryResponseStream {
buffer: ChunkBuffer,
input: Fuse<SendableRecordBatchStream>,
- column_map: HashMap<String, usize>,
+ column_map: ColumnMap,
statement_id: usize,
format: QueryFormat,
epoch: Option<Precision>,
@@ -490,22 +582,11 @@ impl QueryResponseStream {
chunk_size: Option<usize>,
format: QueryFormat,
epoch: Option<Precision>,
+ group_by_clause: Option<GroupByClause>,
) -> Result<Self, anyhow::Error> {
- let buffer = ChunkBuffer::new(chunk_size);
let schema = input.schema();
- let column_map = schema
- .fields
- .iter()
- .map(|f| f.name().to_owned())
- .enumerate()
- .flat_map(|(i, n)| {
- if n != INFLUXQL_MEASUREMENT_COLUMN_NAME && i > 0 {
- Some((n, i - 1))
- } else {
- None
- }
- })
- .collect();
+ let buffer = ChunkBuffer::new(chunk_size);
+ let column_map = ColumnMap::new(schema, group_by_clause)?;
Ok(Self {
buffer,
column_map,
@@ -543,7 +624,8 @@ impl QueryResponseStream {
let schema = batch.schema();
for row_index in 0..batch.num_rows() {
- let mut row = vec![Value::Null; column_map.len()];
+ let mut row = vec![Value::Null; column_map.row_size()];
+ let mut tags = None;
for (col_index, column) in columns.iter().enumerate() {
let field = schema.field(col_index);
@@ -577,32 +659,43 @@ impl QueryResponseStream {
cell_value = convert_ns_epoch(cell_value, precision)?
}
}
- let col_position = column_map
- .get(column_name)
- .context("failed to retrieve column position")?;
- row[*col_position] = cell_value;
+ if let Some(index) = column_map.as_row_index(column_name) {
+ row[index] = cell_value;
+ } else if column_map.is_group_by_tag(column_name) {
+ let tag_val = match cell_value {
+ Value::Null => None,
+ Value::String(s) => Some(s),
+ other => bail!(
+ "tag column {column_name} expected as a string or null, got {other:?}"
+ ),
+ };
+ tags.get_or_insert_with(HashMap::new)
+ .insert(column_name.to_string(), tag_val);
+ } else if column_map.is_orphan_group_by_tag(column_name) {
+ tags.get_or_insert_with(HashMap::new)
+ .insert(column_name.to_string(), Some(String::default()));
+ } else {
+ bail!("failed to retrieve column position for column with name {column_name}");
+ }
}
- self.buffer.push_row(Row(row))?;
+ self.buffer.push_row(tags.take(), Row(row))?;
}
Ok(())
}
- fn columns(&self) -> Vec<String> {
- let mut columns = vec!["".to_string(); self.column_map.len()];
- self.column_map
- .iter()
- .for_each(|(k, i)| k.clone_into(&mut columns[*i]));
- columns
+ fn column_names(&self) -> Vec<String> {
+ self.column_map.row_column_names()
}
/// Flush a single chunk, or time series, when operating in chunked mode
fn flush_one(&mut self) -> QueryResponse {
- let columns = self.columns();
+ let columns = self.column_names();
// this unwrap is okay because we only ever call flush_one
// after calling can_flush on the buffer:
- let (name, values) = self.buffer.flush_one().unwrap();
+ let (name, tags, values) = self.buffer.flush_one().unwrap();
let series = vec![Series {
name,
+ tags,
columns,
values,
}];
@@ -618,13 +711,14 @@ impl QueryResponseStream {
/// Flush the entire buffer
fn flush_all(&mut self) -> QueryResponse {
- let columns = self.columns();
+ let columns = self.column_names();
let series = self
.buffer
.series
.drain(..)
- .map(|(name, values)| Series {
+ .map(|(name, tags, values)| Series {
name,
+ tags,
columns: columns.clone(),
values,
})
@@ -771,6 +865,191 @@ fn cast_column_value(column: &ArrayRef, row_index: usize) -> Result<Value, anyho
Ok(value)
}
+/// Map column names to their respective [`ColumnType`]
+struct ColumnMap {
+ /// The map of column names to column types
+ map: HashMap<String, ColumnType>,
+ /// How many columns are in the `values` set, i.e., that are not `GROUP BY` tags
+ row_size: usize,
+}
+
+/// A column's type in the context of a v1 /query API response
+enum ColumnType {
+ /// A value to be included in the `series.[].values` array, at the given `index`
+ Value { index: usize },
+ /// A tag that is part of the `GROUP BY` clause, either explicitly, or by a regex/wildcard match
+ /// and is included in the `series.[].tags` map
+ GroupByTag,
+ /// This is a case where a GROUP BY clause contains a field which doesn't exist in the table
+ ///
+ /// For example,
+ /// ```text
+ /// select * from foo group by t1, t2
+ /// ```
+ /// If `t1` is a tag in the table, but `t2` is not, nor is a field in the table, then the v1
+ /// /query API response will include `t2` in the `series.[].tags` property in the results with
+ /// an empty string for a value (`""`).
+ OrphanGroupByTag,
+}
+
+impl ColumnMap {
+ /// Create a new `ColumnMap`
+ fn new(
+ schema: SchemaRef,
+ group_by_clause: Option<GroupByClause>,
+ ) -> Result<Self, anyhow::Error> {
+ let mut map = HashMap::new();
+ let group_by = if let Some(clause) = group_by_clause {
+ GroupByEval::from_clause(clause)?
+ } else {
+ None
+ };
+ let mut index = 0;
+ for field in schema
+ .fields()
+ .into_iter()
+ .filter(|f| f.name() != INFLUXQL_MEASUREMENT_COLUMN_NAME)
+ {
+ if group_by
+ .as_ref()
+ .is_some_and(|gb| is_tag(field) && gb.evaluate_tag(field.name()))
+ {
+ map.insert(field.name().to_string(), ColumnType::GroupByTag);
+ } else if group_by.as_ref().is_some_and(|gb| {
+ field.metadata().is_empty() && gb.contains_explicit_col_name(field.name())
+ }) {
+ map.insert(field.name().to_string(), ColumnType::OrphanGroupByTag);
+ } else {
+ map.insert(field.name().to_string(), ColumnType::Value { index });
+ index += 1;
+ }
+ }
+ Ok(Self {
+ map,
+ row_size: index,
+ })
+ }
+
+ fn row_size(&self) -> usize {
+ self.row_size
+ }
+
+ fn row_column_names(&self) -> Vec<String> {
+ let mut result = vec![None; self.row_size];
+ self.map.iter().for_each(|(name, c)| {
+ if let ColumnType::Value { index } = c {
+ result[*index].replace(name.to_owned());
+ }
+ });
+ result.into_iter().flatten().collect()
+ }
+
+ /// If this column is part of the `values` row data, get its index, or `None` otherwise
+ fn as_row_index(&self, column_name: &str) -> Option<usize> {
+ self.map.get(column_name).and_then(|col| match col {
+ ColumnType::Value { index } => Some(*index),
+ ColumnType::GroupByTag | ColumnType::OrphanGroupByTag => None,
+ })
+ }
+
+ /// This column is a `GROUP BY` tag
+ fn is_group_by_tag(&self, column_name: &str) -> bool {
+ self.map
+ .get(column_name)
+ .is_some_and(|col| matches!(col, ColumnType::GroupByTag))
+ }
+
+ /// This column is an orphan `GROUP BY` tag
+ fn is_orphan_group_by_tag(&self, column_name: &str) -> bool {
+ self.map
+ .get(column_name)
+ .is_some_and(|col| matches!(col, ColumnType::OrphanGroupByTag))
+ }
+}
+
+// TODO: this is defined in schema crate, so needs to be made pub there:
+const COLUMN_METADATA_KEY: &str = "iox::column::type";
+
+/// Decide based on metadata if this [`Field`] is a tag column
+fn is_tag(field: &Arc<Field>) -> bool {
+ field
+ .metadata()
+ .get(COLUMN_METADATA_KEY)
+ .map(|s| InfluxColumnType::try_from(s.as_str()))
+ .transpose()
+ .ok()
+ .flatten()
+ .is_some_and(|t| matches!(t, InfluxColumnType::Tag))
+}
+
+/// Derived from a [`GroupByClause`] and used to evaluate whether a given tag column is part of the
+/// `GROUP BY` clause in an InfluxQL query
+struct GroupByEval(Vec<GroupByEvalType>);
+
+/// The kind of `GROUP BY` evaluator
+enum GroupByEvalType {
+ /// An explicit tag name in a `GROUP BY` clause, e.g., `GROUP BY t1, t2`
+ Tag(String),
+ /// A regex in a `GROUP BY` that could match 0-or-more tags, e.g., `GROUP BY /t[1,2]/`
+ Regex(Regex),
+ /// A wildcard that matches all tags, e.g., `GROUP BY *`
+ Wildcard,
+}
+
+impl GroupByEval {
+ /// Convert a [`GroupByClause`] to a [`GroupByEval`] if any of its members match on tag columns
+ ///
+ /// This will produce an error if an invalid regex is provided as one of the `GROUP BY` clauses.
+ /// That will likely be caught upstream during query parsing, but handle it here anyway.
+ fn from_clause(clause: GroupByClause) -> Result<Option<Self>, anyhow::Error> {
+ let v = clause
+ .iter()
+ .filter_map(|dim| match dim {
+ Dimension::Time(_) => None,
+ Dimension::VarRef(tag) => Some(Ok(GroupByEvalType::Tag(tag.to_string()))),
+ Dimension::Regex(regex) => Some(
+ Regex::new(regex.as_str())
+ .map(GroupByEvalType::Regex)
+ .context("invalid regex in group by clause"),
+ ),
+ Dimension::Wildcard => Some(Ok(GroupByEvalType::Wildcard)),
+ })
+ .collect::<Result<Vec<GroupByEvalType>, anyhow::Error>>()?;
+
+ if v.is_empty() {
+ Ok(None)
+ } else {
+ Ok(Some(Self(v)))
+ }
+ }
+
+ /// Check if a tag is matched by this set of `GROUP BY` clauses
+ fn evaluate_tag(&self, tag_name: &str) -> bool {
+ self.0.iter().any(|eval| eval.test(tag_name))
+ }
+
+ /// Check if the tag name is included explicitly in the `GROUP BY` clause.
+ ///
+ /// This is for determining orphan `GROUP BY` tag columns.
+ fn contains_explicit_col_name(&self, col_name: &str) -> bool {
+ self.0.iter().any(|eval| match eval {
+ GroupByEvalType::Tag(t) => t == col_name,
+ _ => false,
+ })
+ }
+}
+
+impl GroupByEvalType {
+ /// Test the given `tag_name` agains this evaluator
+ fn test(&self, tag_name: &str) -> bool {
+ match self {
+ Self::Tag(t) => t == tag_name,
+ Self::Regex(regex) => regex.is_match(tag_name),
+ Self::Wildcard => true,
+ }
+ }
+}
+
impl Stream for QueryResponseStream {
type Item = Result<QueryResponse, anyhow::Error>;
@@ -808,6 +1087,7 @@ impl Stream for QueryResponseStream {
// this is why the input stream is fused, because we will end up
// polling the input stream again if we end up here.
Poll::Ready(Some(Ok(self.flush_all())))
+ // Poll::Ready(None)
} else if self.state.is_initialized() {
// we are still in an initialized state, which means no records were buffered
// and therefore we need to emit an empty result set before ending the stream:
diff --git a/influxdb3_server/src/query_executor/mod.rs b/influxdb3_server/src/query_executor/mod.rs
index 493350431e..3d578ff277 100644
--- a/influxdb3_server/src/query_executor/mod.rs
+++ b/influxdb3_server/src/query_executor/mod.rs
@@ -21,10 +21,11 @@ use datafusion_util::MemoryStream;
use influxdb3_cache::distinct_cache::{DistinctCacheFunction, DISTINCT_CACHE_UDTF_NAME};
use influxdb3_cache::last_cache::{LastCacheFunction, LAST_CACHE_UDTF_NAME};
use influxdb3_catalog::catalog::{Catalog, DatabaseSchema};
-use influxdb3_internal_api::query_executor::{QueryExecutor, QueryExecutorError, QueryKind};
+use influxdb3_internal_api::query_executor::{QueryExecutor, QueryExecutorError};
use influxdb3_sys_events::SysEventStore;
use influxdb3_telemetry::store::TelemetryStore;
use influxdb3_write::WriteBuffer;
+use influxdb_influxql_parser::statement::Statement;
use iox_query::exec::{Executor, IOxSessionContext, QueryConfig};
use iox_query::provider::ProviderBuilder;
use iox_query::query_log::QueryLog;
@@ -109,35 +110,66 @@ impl QueryExecutorImpl {
sys_events_store,
}
}
+
+ async fn get_db_namespace(
+ &self,
+ database_name: &str,
+ span_ctx: &Option<SpanContext>,
+ ) -> Result<Arc<dyn QueryNamespace>, QueryExecutorError> {
+ self.namespace(
+ database_name,
+ span_ctx.child_span("get_db_namespace"),
+ false,
+ )
+ .await
+ .map_err(|_| QueryExecutorError::DatabaseNotFound {
+ db_name: database_name.to_string(),
+ })?
+ .ok_or_else(|| QueryExecutorError::DatabaseNotFound {
+ db_name: database_name.to_string(),
+ })
+ }
}
#[async_trait]
impl QueryExecutor for QueryExecutorImpl {
- async fn query(
+ async fn query_sql(
+ &self,
+ database: &str,
+ query: &str,
+ params: Option<StatementParams>,
+ span_ctx: Option<SpanContext>,
+ external_span_ctx: Option<RequestLogContext>,
+ ) -> Result<SendableRecordBatchStream, QueryExecutorError> {
+ info!(%database, %query, ?params, "executing sql query");
+ let db = self.get_db_namespace(database, &span_ctx).await?;
+ query_database_sql(
+ db,
+ query,
+ params,
+ span_ctx,
+ external_span_ctx,
+ Arc::clone(&self.telemetry_store),
+ )
+ .await
+ }
+
+ async fn query_influxql(
&self,
database: &str,
query: &str,
+ influxql_statement: Statement,
params: Option<StatementParams>,
- kind: QueryKind,
span_ctx: Option<SpanContext>,
external_span_ctx: Option<RequestLogContext>,
) -> Result<SendableRecordBatchStream, QueryExecutorError> {
- info!(%database, %query, ?params, ?kind, "QueryExecutorImpl as QueryExecutor::query");
- let db = self
- .namespace(database, span_ctx.child_span("get database"), false)
- .await
- .map_err(|_| QueryExecutorError::DatabaseNotFound {
- db_name: database.to_string(),
- })?
- .ok_or_else(|| QueryExecutorError::DatabaseNotFound {
- db_name: database.to_string(),
- })?;
-
- query_database(
+ info!(database, query, ?params, "executing influxql query");
+ let db = self.get_db_namespace(database, &span_ctx).await?;
+ query_database_influxql(
db,
query,
+ influxql_statement,
params,
- kind,
span_ctx,
external_span_ctx,
Arc::clone(&self.telemetry_store),
@@ -232,11 +264,12 @@ impl QueryExecutor for QueryExecutorImpl {
}
}
-async fn query_database(
+// NOTE: this method is separated out as it is called from a separate query executor
+// implementation in Enterprise
+async fn query_database_sql(
db: Arc<dyn QueryNamespace>,
query: &str,
params: Option<StatementParams>,
- kind: QueryKind,
span_ctx: Option<SpanContext>,
external_span_ctx: Option<RequestLogContext>,
telemetry_store: Arc<TelemetryStore>,
@@ -245,7 +278,7 @@ async fn query_database(
let token = db.record_query(
external_span_ctx.as_ref().map(RequestLogContext::ctx),
- kind.query_type(),
+ "sql",
Box::new(query.to_string()),
params.clone(),
);
@@ -258,12 +291,7 @@ async fn query_database(
// Perform query planning on a separate threadpool than the IO runtime that is servicing
// this request by using `IOxSessionContext::run`.
let plan = ctx
- .run(async move {
- match kind {
- QueryKind::Sql => planner.sql(query, params).await,
- QueryKind::InfluxQl => planner.influxql(query, params).await,
- }
- })
+ .run(async move { planner.sql(query, params).await })
.await;
let plan = match plan.map_err(QueryExecutorError::QueryPlanning) {
@@ -292,6 +320,55 @@ async fn query_database(
}
}
+async fn query_database_influxql(
+ db: Arc<dyn QueryNamespace>,
+ query_str: &str,
+ statement: Statement,
+ params: Option<StatementParams>,
+ span_ctx: Option<SpanContext>,
+ external_span_ctx: Option<RequestLogContext>,
+ telemetry_store: Arc<TelemetryStore>,
+) -> Result<SendableRecordBatchStream, QueryExecutorError> {
+ let params = params.unwrap_or_default();
+ let token = db.record_query(
+ external_span_ctx.as_ref().map(RequestLogContext::ctx),
+ "influxql",
+ Box::new(query_str.to_string()),
+ params.clone(),
+ );
+
+ let ctx = db.new_query_context(span_ctx, Default::default());
+ let planner = Planner::new(&ctx);
+ let plan = ctx
+ .run(async move { planner.influxql(statement, params).await })
+ .await;
+
+ let plan = match plan.map_err(QueryExecutorError::QueryPlanning) {
+ Ok(plan) => plan,
+ Err(e) => {
+ token.fail();
+ return Err(e);
+ }
+ };
+
+ let token = token.planned(&ctx, Arc::clone(&plan));
+
+ let token = token.permit();
+
+ telemetry_store.update_num_queries();
+
+ match ctx.execute_stream(Arc::clone(&plan)).await {
+ Ok(query_results) => {
+ token.success();
+ Ok(query_results)
+ }
+ Err(err) => {
+ token.fail();
+ Err(QueryExecutorError::ExecuteStream(err))
+ }
+ }
+}
+
#[derive(Debug)]
struct RetentionPolicyRow {
database: String,
@@ -682,7 +759,7 @@ mod tests {
parquet_cache::test_cached_obj_store_and_oracle,
};
use influxdb3_catalog::catalog::Catalog;
- use influxdb3_internal_api::query_executor::{QueryExecutor, QueryKind};
+ use influxdb3_internal_api::query_executor::QueryExecutor;
use influxdb3_sys_events::SysEventStore;
use influxdb3_telemetry::store::TelemetryStore;
use influxdb3_wal::{Gen1Duration, WalConfig};
@@ -895,7 +972,7 @@ mod tests {
for t in test_cases {
let batch_stream = query_executor
- .query(db_name, t.query, None, QueryKind::Sql, None, None)
+ .query_sql(db_name, t.query, None, None, None)
.await
.unwrap();
let batches: Vec<RecordBatch> = batch_stream.try_collect().await.unwrap();
diff --git a/influxdb3_server/src/query_planner.rs b/influxdb3_server/src/query_planner.rs
index 1f068fa38c..1f39c88c75 100644
--- a/influxdb3_server/src/query_planner.rs
+++ b/influxdb3_server/src/query_planner.rs
@@ -1,6 +1,15 @@
-use std::sync::Arc;
+use std::{any::Any, sync::Arc};
-use datafusion::{error::DataFusionError, physical_plan::ExecutionPlan};
+use arrow_schema::SchemaRef;
+use datafusion::{
+ error::DataFusionError,
+ execution::{SendableRecordBatchStream, TaskContext},
+ physical_expr::EquivalenceProperties,
+ physical_plan::{
+ DisplayAs, DisplayFormatType, ExecutionPlan, ExecutionPlanProperties, PlanProperties,
+ },
+};
+use influxdb_influxql_parser::statement::Statement;
use iox_query::{exec::IOxSessionContext, frontend::sql::SqlQueryPlanner};
use iox_query_influxql::frontend::planner::InfluxQLQueryPlanner;
use iox_query_params::StatementParams;
@@ -41,12 +50,119 @@ impl Planner {
/// Plan an InfluxQL query and return a DataFusion physical plan
pub(crate) async fn influxql(
&self,
- query: impl AsRef<str> + Send,
+ statement: Statement,
params: impl Into<StatementParams> + Send,
) -> Result<Arc<dyn ExecutionPlan>> {
- let query = query.as_ref();
let ctx = self.ctx.child_ctx("rest_api_query_planner_influxql");
- InfluxQLQueryPlanner::query(query, params, &ctx).await
+ let logical_plan = InfluxQLQueryPlanner::statement_to_plan(statement, params, &ctx).await?;
+ let input = ctx.create_physical_plan(&logical_plan).await?;
+ let input_schema = input.schema();
+ let mut md = input_schema.metadata().clone();
+ md.extend(logical_plan.schema().metadata().clone());
+ let schema = Arc::new(arrow::datatypes::Schema::new_with_metadata(
+ input_schema.fields().clone(),
+ md,
+ ));
+
+ Ok(Arc::new(SchemaExec::new(input, schema)))
+ }
+}
+
+// NOTE: the below code is currently copied from IOx and needs to be made pub so we can
+// re-use it.
+
+/// A physical operator that overrides the `schema` API,
+/// to return an amended version owned by `SchemaExec`. The
+/// principal use case is to add additional metadata to the schema.
+struct SchemaExec {
+ input: Arc<dyn ExecutionPlan>,
+ schema: SchemaRef,
+
+ /// Cache holding plan properties like equivalences, output partitioning, output ordering etc.
+ cache: PlanProperties,
+}
+
+impl SchemaExec {
+ fn new(input: Arc<dyn ExecutionPlan>, schema: SchemaRef) -> Self {
+ let cache = Self::compute_properties(&input, Arc::clone(&schema));
+ Self {
+ input,
+ schema,
+ cache,
+ }
+ }
+
+ /// This function creates the cache object that stores the plan properties such as equivalence properties, partitioning, ordering, etc.
+ fn compute_properties(input: &Arc<dyn ExecutionPlan>, schema: SchemaRef) -> PlanProperties {
+ let eq_properties = match input.properties().output_ordering() {
+ None => EquivalenceProperties::new(schema),
+ Some(output_odering) => {
+ EquivalenceProperties::new_with_orderings(schema, &[output_odering.to_vec()])
+ }
+ };
+
+ let output_partitioning = input.output_partitioning().clone();
+
+ PlanProperties::new(eq_properties, output_partitioning, input.execution_mode())
+ }
+}
+
+impl std::fmt::Debug for SchemaExec {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ self.fmt_as(DisplayFormatType::Default, f)
+ }
+}
+
+impl ExecutionPlan for SchemaExec {
+ fn name(&self) -> &str {
+ Self::static_name()
+ }
+
+ fn as_any(&self) -> &dyn Any {
+ self
+ }
+
+ fn schema(&self) -> SchemaRef {
+ Arc::clone(&self.schema)
+ }
+
+ fn properties(&self) -> &PlanProperties {
+ &self.cache
+ }
+
+ fn children(&self) -> Vec<&Arc<dyn ExecutionPlan>> {
+ vec![&self.input]
+ }
+
+ fn with_new_children(
+ self: Arc<Self>,
+ _children: Vec<Arc<dyn ExecutionPlan>>,
+ ) -> Result<Arc<dyn ExecutionPlan>> {
+ unimplemented!()
+ }
+
+ fn execute(
+ &self,
+ partition: usize,
+ context: Arc<TaskContext>,
+ ) -> Result<SendableRecordBatchStream> {
+ self.input.execute(partition, context)
+ }
+
+ fn statistics(&self) -> Result<datafusion::physical_plan::Statistics, DataFusionError> {
+ Ok(datafusion::physical_plan::Statistics::new_unknown(
+ &self.schema(),
+ ))
+ }
+}
+
+impl DisplayAs for SchemaExec {
+ fn fmt_as(&self, t: DisplayFormatType, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ match t {
+ DisplayFormatType::Default | DisplayFormatType::Verbose => {
+ write!(f, "SchemaExec")
+ }
+ }
}
}
|
e4a5d2efaa5638dc8086cacecc8b4d31ff92a016
|
Fraser Savage
|
2023-08-01 14:21:56
|
Expose `num_probes` request count used to health-check ingesters as config option
|
This allows routers to be configured to mark downstreams as healthy/
unhealthy with a requirement for the number of probe requests
which can/must be collected to transition the health checkers circuit
state to healthy/unhealthy.
| null |
feat(router): Expose `num_probes` request count used to health-check ingesters as config option
This allows routers to be configured to mark downstreams as healthy/
unhealthy with a requirement for the number of probe requests
which can/must be collected to transition the health checkers circuit
state to healthy/unhealthy.
|
diff --git a/clap_blocks/src/router.rs b/clap_blocks/src/router.rs
index 18633bcd3e..20ea322443 100644
--- a/clap_blocks/src/router.rs
+++ b/clap_blocks/src/router.rs
@@ -142,6 +142,16 @@ pub struct RouterConfig {
value_parser = parse_duration
)]
pub rpc_write_health_error_window_seconds: Duration,
+
+ /// Specify the number of probe requests made within a probing window of
+ /// 1 second to compare against the maximum error ratio of 80% when the
+ /// is making judgements about the health of downstream RPC write handlers.
+ #[clap(
+ long = "rpc-write-health-num-probes",
+ env = "INFLUXDB_IOX_RPC_WRITE_HEALTH_NUM_PROBES",
+ default_value = "10"
+ )]
+ pub rpc_write_health_num_probes: u64,
}
/// Map a string containing an integer number of seconds into a [`Duration`].
diff --git a/influxdb_iox/src/commands/run/all_in_one.rs b/influxdb_iox/src/commands/run/all_in_one.rs
index f2ac51c7bb..20d46f84fb 100644
--- a/influxdb_iox/src/commands/run/all_in_one.rs
+++ b/influxdb_iox/src/commands/run/all_in_one.rs
@@ -491,6 +491,7 @@ impl Config {
rpc_write_replicas: 1.try_into().unwrap(),
rpc_write_max_outgoing_bytes: ingester_config.rpc_write_max_incoming_bytes,
rpc_write_health_error_window_seconds: Duration::from_secs(5),
+ rpc_write_health_num_probes: 10,
gossip_config: GossipConfig::disabled(),
};
diff --git a/ioxd_router/src/lib.rs b/ioxd_router/src/lib.rs
index 4da5ece49f..2e0df2fef7 100644
--- a/ioxd_router/src/lib.rs
+++ b/ioxd_router/src/lib.rs
@@ -248,6 +248,7 @@ pub async fn create_router_server_type(
router_config.rpc_write_replicas,
&metrics,
router_config.rpc_write_health_error_window_seconds,
+ router_config.rpc_write_health_num_probes,
);
let rpc_writer = InstrumentationDecorator::new("rpc_writer", &metrics, rpc_writer);
diff --git a/router/src/dml_handlers/rpc_write.rs b/router/src/dml_handlers/rpc_write.rs
index 528ad2eb92..e777b60eaf 100644
--- a/router/src/dml_handlers/rpc_write.rs
+++ b/router/src/dml_handlers/rpc_write.rs
@@ -116,7 +116,9 @@ pub struct RpcWrite<T, C = CircuitBreaker> {
impl<T> RpcWrite<T> {
/// Initialise a new [`RpcWrite`] that sends requests to an arbitrary
- /// downstream Ingester, using a round-robin strategy.
+ /// downstream Ingester, using a round-robin strategy. Health checks are
+ /// configured by `error_window` and `num_probes` as laid out by the
+ /// documentation for [`CircuitBreaker`].
///
/// If [`Some`], `replica_copies` specifies the number of additional
/// upstream ingesters that must receive and acknowledge the write for it to
@@ -131,6 +133,7 @@ impl<T> RpcWrite<T> {
n_copies: NonZeroUsize,
metrics: &metric::Registry,
error_window: Duration,
+ num_probes: u64,
) -> Self
where
T: Send + Sync + Debug + 'static,
@@ -138,7 +141,7 @@ impl<T> RpcWrite<T> {
{
let endpoints = Balancer::new(
endpoints.into_iter().map(|(client, name)| {
- CircuitBreakingClient::new(client, name.into(), error_window)
+ CircuitBreakingClient::new(client, name.into(), error_window, num_probes)
}),
Some(metrics),
);
@@ -385,6 +388,7 @@ mod tests {
const NAMESPACE_NAME: &str = "bananas";
const NAMESPACE_ID: NamespaceId = NamespaceId::new(42);
const ARBITRARY_TEST_ERROR_WINDOW: Duration = Duration::from_secs(5);
+ const ARBITRARY_TEST_NUM_PROBES: u64 = 10;
// Start a new `NamespaceSchema` with only the given ID; the rest of the fields are arbitrary.
fn new_empty_namespace_schema() -> Arc<NamespaceSchema> {
@@ -463,6 +467,7 @@ mod tests {
1.try_into().unwrap(),
&metric::Registry::default(),
ARBITRARY_TEST_ERROR_WINDOW,
+ ARBITRARY_TEST_NUM_PROBES,
);
// Drive the RPC writer
@@ -526,6 +531,7 @@ mod tests {
1.try_into().unwrap(),
&metric::Registry::default(),
ARBITRARY_TEST_ERROR_WINDOW,
+ ARBITRARY_TEST_NUM_PROBES,
);
// Drive the RPC writer
@@ -595,6 +601,7 @@ mod tests {
1.try_into().unwrap(),
&metric::Registry::default(),
ARBITRARY_TEST_ERROR_WINDOW,
+ ARBITRARY_TEST_NUM_PROBES,
);
// Drive the RPC writer
@@ -645,10 +652,13 @@ mod tests {
circuit_1.set_healthy(false);
let got = make_request(
- [
- CircuitBreakingClient::new(client_1, "client_1", ARBITRARY_TEST_ERROR_WINDOW)
- .with_circuit_breaker(circuit_1),
- ],
+ [CircuitBreakingClient::new(
+ client_1,
+ "client_1",
+ ARBITRARY_TEST_ERROR_WINDOW,
+ ARBITRARY_TEST_NUM_PROBES,
+ )
+ .with_circuit_breaker(circuit_1)],
1,
)
.await;
@@ -669,10 +679,13 @@ mod tests {
circuit_1.set_healthy(true);
let got = make_request(
- [
- CircuitBreakingClient::new(client_1, "client_1", ARBITRARY_TEST_ERROR_WINDOW)
- .with_circuit_breaker(circuit_1),
- ],
+ [CircuitBreakingClient::new(
+ client_1,
+ "client_1",
+ ARBITRARY_TEST_ERROR_WINDOW,
+ ARBITRARY_TEST_NUM_PROBES,
+ )
+ .with_circuit_breaker(circuit_1)],
1,
)
.await;
@@ -693,10 +706,13 @@ mod tests {
circuit_1.set_healthy(true);
let got = make_request(
- [
- CircuitBreakingClient::new(client_1, "client_1", ARBITRARY_TEST_ERROR_WINDOW)
- .with_circuit_breaker(circuit_1),
- ],
+ [CircuitBreakingClient::new(
+ client_1,
+ "client_1",
+ ARBITRARY_TEST_ERROR_WINDOW,
+ ARBITRARY_TEST_NUM_PROBES,
+ )
+ .with_circuit_breaker(circuit_1)],
1,
)
.await;
@@ -728,10 +744,20 @@ mod tests {
let got = make_request(
[
- CircuitBreakingClient::new(client_1, "client_1", ARBITRARY_TEST_ERROR_WINDOW)
- .with_circuit_breaker(circuit_1),
- CircuitBreakingClient::new(client_2, "client_2", ARBITRARY_TEST_ERROR_WINDOW)
- .with_circuit_breaker(circuit_2),
+ CircuitBreakingClient::new(
+ client_1,
+ "client_1",
+ ARBITRARY_TEST_ERROR_WINDOW,
+ ARBITRARY_TEST_NUM_PROBES,
+ )
+ .with_circuit_breaker(circuit_1),
+ CircuitBreakingClient::new(
+ client_2,
+ "client_2",
+ ARBITRARY_TEST_ERROR_WINDOW,
+ ARBITRARY_TEST_NUM_PROBES,
+ )
+ .with_circuit_breaker(circuit_2),
],
2, // 2 copies required
)
@@ -758,12 +784,14 @@ mod tests {
Arc::clone(&client_1),
"client_1",
ARBITRARY_TEST_ERROR_WINDOW,
+ ARBITRARY_TEST_NUM_PROBES,
)
.with_circuit_breaker(circuit_1),
CircuitBreakingClient::new(
Arc::clone(&client_2),
"client_2",
ARBITRARY_TEST_ERROR_WINDOW,
+ ARBITRARY_TEST_NUM_PROBES,
)
.with_circuit_breaker(circuit_2),
],
@@ -802,12 +830,14 @@ mod tests {
Arc::clone(&client_1),
"client_1",
ARBITRARY_TEST_ERROR_WINDOW,
+ ARBITRARY_TEST_NUM_PROBES,
)
.with_circuit_breaker(circuit_1),
CircuitBreakingClient::new(
Arc::clone(&client_2),
"client_2",
ARBITRARY_TEST_ERROR_WINDOW,
+ ARBITRARY_TEST_NUM_PROBES,
)
.with_circuit_breaker(circuit_2),
];
@@ -856,12 +886,14 @@ mod tests {
Arc::clone(&client_1),
"client_1",
ARBITRARY_TEST_ERROR_WINDOW,
+ ARBITRARY_TEST_NUM_PROBES,
)
.with_circuit_breaker(circuit_1),
CircuitBreakingClient::new(
Arc::clone(&client_2),
"client_2",
ARBITRARY_TEST_ERROR_WINDOW,
+ ARBITRARY_TEST_NUM_PROBES,
)
.with_circuit_breaker(circuit_2),
],
@@ -920,18 +952,21 @@ mod tests {
Arc::clone(&client_1),
"client_1",
ARBITRARY_TEST_ERROR_WINDOW,
+ ARBITRARY_TEST_NUM_PROBES,
)
.with_circuit_breaker(circuit_1),
CircuitBreakingClient::new(
Arc::clone(&client_2),
"client_2",
ARBITRARY_TEST_ERROR_WINDOW,
+ ARBITRARY_TEST_NUM_PROBES,
)
.with_circuit_breaker(circuit_2),
CircuitBreakingClient::new(
Arc::clone(&client_3),
"client_3",
ARBITRARY_TEST_ERROR_WINDOW,
+ ARBITRARY_TEST_NUM_PROBES,
)
.with_circuit_breaker(circuit_3),
];
@@ -1035,7 +1070,11 @@ mod tests {
async move {
let endpoints = upstreams.into_iter()
.map(|(circuit, client)| {
- CircuitBreakingClient::new(client, "bananas",ARBITRARY_TEST_ERROR_WINDOW)
+ CircuitBreakingClient::new(client, "bananas",ARBITRARY_TEST_ERROR_WINDOW,
+
+
+ ARBITRARY_TEST_NUM_PROBES,
+ )
.with_circuit_breaker(circuit)
});
diff --git a/router/src/dml_handlers/rpc_write/balancer.rs b/router/src/dml_handlers/rpc_write/balancer.rs
index 235ae6d7be..6bc0143e92 100644
--- a/router/src/dml_handlers/rpc_write/balancer.rs
+++ b/router/src/dml_handlers/rpc_write/balancer.rs
@@ -220,6 +220,7 @@ mod tests {
use super::*;
const ARBITRARY_TEST_ERROR_WINDOW: Duration = Duration::from_secs(5);
+ const ARBITRARY_TEST_NUM_PROBES: u64 = 10;
/// No healthy nodes prevents a snapshot from being returned.
#[tokio::test]
@@ -233,6 +234,7 @@ mod tests {
Arc::new(MockWriteClient::default()),
"bananas",
ARBITRARY_TEST_ERROR_WINDOW,
+ ARBITRARY_TEST_NUM_PROBES,
)
.with_circuit_breaker(Arc::clone(&circuit_err_1));
@@ -243,6 +245,7 @@ mod tests {
Arc::new(MockWriteClient::default()),
"bananas",
ARBITRARY_TEST_ERROR_WINDOW,
+ ARBITRARY_TEST_NUM_PROBES,
)
.with_circuit_breaker(Arc::clone(&circuit_err_2));
@@ -266,6 +269,7 @@ mod tests {
Arc::new(MockWriteClient::default()),
"bananas",
ARBITRARY_TEST_ERROR_WINDOW,
+ ARBITRARY_TEST_NUM_PROBES,
)
.with_circuit_breaker(Arc::clone(&circuit_err_1));
@@ -276,6 +280,7 @@ mod tests {
Arc::new(MockWriteClient::default()),
"bananas",
ARBITRARY_TEST_ERROR_WINDOW,
+ ARBITRARY_TEST_NUM_PROBES,
)
.with_circuit_breaker(Arc::clone(&circuit_err_2));
let circuit_ok = Arc::new(MockCircuitBreaker::default());
@@ -285,6 +290,7 @@ mod tests {
Arc::new(MockWriteClient::default()),
"bananas",
ARBITRARY_TEST_ERROR_WINDOW,
+ ARBITRARY_TEST_NUM_PROBES,
)
.with_circuit_breaker(Arc::clone(&circuit_ok));
@@ -345,6 +351,7 @@ mod tests {
Arc::new(MockWriteClient::default()),
"bananas",
ARBITRARY_TEST_ERROR_WINDOW,
+ ARBITRARY_TEST_NUM_PROBES,
)
.with_circuit_breaker(Arc::clone(&circuit_err_1));
@@ -355,6 +362,7 @@ mod tests {
Arc::new(MockWriteClient::default()),
"bananas",
ARBITRARY_TEST_ERROR_WINDOW,
+ ARBITRARY_TEST_NUM_PROBES,
)
.with_circuit_breaker(Arc::clone(&circuit_err_2));
@@ -364,6 +372,7 @@ mod tests {
Arc::new(MockWriteClient::default()),
"bananas",
ARBITRARY_TEST_ERROR_WINDOW,
+ ARBITRARY_TEST_NUM_PROBES,
)
.with_circuit_breaker(Arc::clone(&circuit_ok));
@@ -415,6 +424,7 @@ mod tests {
Arc::new(MockWriteClient::default()),
"bananas",
ARBITRARY_TEST_ERROR_WINDOW,
+ ARBITRARY_TEST_NUM_PROBES,
)
.with_circuit_breaker(Arc::clone(&circuit));
@@ -468,6 +478,7 @@ mod tests {
Arc::new(MockWriteClient::default()),
"bananas",
ARBITRARY_TEST_ERROR_WINDOW,
+ ARBITRARY_TEST_NUM_PROBES,
)
.with_circuit_breaker(Arc::clone(&circuit_err));
@@ -477,6 +488,7 @@ mod tests {
Arc::new(MockWriteClient::default()),
"bananas",
ARBITRARY_TEST_ERROR_WINDOW,
+ ARBITRARY_TEST_NUM_PROBES,
)
.with_circuit_breaker(Arc::clone(&circuit_ok_1));
@@ -486,6 +498,7 @@ mod tests {
Arc::new(MockWriteClient::default()),
"bananas",
ARBITRARY_TEST_ERROR_WINDOW,
+ ARBITRARY_TEST_NUM_PROBES,
)
.with_circuit_breaker(Arc::clone(&circuit_ok_2));
@@ -523,6 +536,7 @@ mod tests {
Arc::new(MockWriteClient::default()),
"bad-client-1",
ARBITRARY_TEST_ERROR_WINDOW,
+ ARBITRARY_TEST_NUM_PROBES,
)
.with_circuit_breaker(Arc::clone(&circuit_err_1));
@@ -533,6 +547,7 @@ mod tests {
Arc::new(MockWriteClient::default()),
"bad-client-2",
ARBITRARY_TEST_ERROR_WINDOW,
+ ARBITRARY_TEST_NUM_PROBES,
)
.with_circuit_breaker(Arc::clone(&circuit_err_2));
@@ -542,6 +557,7 @@ mod tests {
Arc::new(MockWriteClient::default()),
"ok-client",
ARBITRARY_TEST_ERROR_WINDOW,
+ ARBITRARY_TEST_NUM_PROBES,
)
.with_circuit_breaker(Arc::clone(&circuit_ok));
@@ -612,6 +628,7 @@ mod tests {
Arc::new(MockWriteClient::default()),
"bad-client-1",
ARBITRARY_TEST_ERROR_WINDOW,
+ ARBITRARY_TEST_NUM_PROBES,
)
.with_circuit_breaker(Arc::clone(&circuit_err));
diff --git a/router/src/dml_handlers/rpc_write/circuit_breaker.rs b/router/src/dml_handlers/rpc_write/circuit_breaker.rs
index 40ea8fc03e..47be61e6eb 100644
--- a/router/src/dml_handlers/rpc_write/circuit_breaker.rs
+++ b/router/src/dml_handlers/rpc_write/circuit_breaker.rs
@@ -20,10 +20,8 @@ use tokio::{
/// healthy. If updating this value, remember to update the documentation
/// in the CLI flag for the configurable error window.
const MAX_ERROR_RATIO: f32 = 0.8;
-/// The maximum number of probe requests to allow when in an unhealthy state.
-const NUM_PROBES: u64 = 10;
-/// The length of time during which up to [`NUM_PROBES`] are allowed when in an
-/// unhealthy state.
+/// The length of time during which up to the configured number of probes
+/// are allowed when in an unhealthy state.
const PROBE_INTERVAL: Duration = Duration::from_secs(1);
/// A low-overhead, error ratio gated [`CircuitBreaker`].
@@ -41,7 +39,7 @@ const PROBE_INTERVAL: Duration = Duration::from_secs(1);
/// to detect recovery (with an expectation this will likely fail). The
/// [`CircuitBreaker`] selects callers to send a probe request, indicated by a
/// `true` return value from [`CircuitBreaker::should_probe()`]. At least
-/// [`NUM_PROBES`] must be sent to drive a [`CircuitBreaker`] to a healthy
+/// the configured number of probes must be sent to drive a [`CircuitBreaker`] to a healthy
/// state, so callers must call [`CircuitBreaker::should_probe()`] periodically.
///
/// When requests made to the ingester return, [`CircuitBreaker::observe()`] is
@@ -63,7 +61,7 @@ const PROBE_INTERVAL: Duration = Duration::from_secs(1);
/// The circuit breaker is considered unhealthy when 80% ([`MAX_ERROR_RATIO`])
/// of requests within the configured error window fail. The breaker
/// becomes healthy again when the error rate falls below 80%
-/// ([`MAX_ERROR_RATIO`]) for the, at most, 10 probe requests ([`NUM_PROBES`])
+/// ([`MAX_ERROR_RATIO`]) for the, at most, configured number of probe requests
/// allowed through within 1 second ([`PROBE_INTERVAL`]).
///
/// The circuit breaker initialises in the healthy state.
@@ -90,7 +88,7 @@ const PROBE_INTERVAL: Duration = Duration::from_secs(1);
///
/// * Half-open: A transition state between "open/unhealthy" and
/// "closed/healthy"; a majority of traffic is refused, but up to
-/// [`NUM_PROBES`] number of requests are allowed to proceed per
+/// the configured number of probes number of requests are allowed to proceed per
/// [`PROBE_INTERVAL`]. Once the probes are sent, the error ratio is
/// evaluated, and the system returns to either open or closed as
/// appropriate.
@@ -103,7 +101,7 @@ const PROBE_INTERVAL: Duration = Duration::from_secs(1);
/// [`MAX_ERROR_RATIO`] within a single window to open the circuit breaker to
/// start being considered unhealthy.
///
-/// A floor of at least [`MAX_ERROR_RATIO`] * [`NUM_PROBES`] must be observed per
+/// A floor of at least [`MAX_ERROR_RATIO`] * `configured num_probes` must be observed per
/// error window before the circuit breaker opens / becomes unhealthy.
///
/// Error ratios are measured on every call to [`CircuitBreaker::is_healthy`],
@@ -118,7 +116,7 @@ const PROBE_INTERVAL: Duration = Duration::from_secs(1);
///
/// ## Probing / Closing (becoming healthy)
///
-/// Once a circuit breaker transitions to "open/unhealthy", up to 10 [`NUM_PROBES`]
+/// Once a circuit breaker transitions to "open/unhealthy", up to the configured number of probe
/// requests are allowed per 1s [`PROBE_INTERVAL`], as determined by calling
/// [`CircuitBreaker::should_probe`] before sending a request. This is referred
/// to as "probing", allowing the client to discover the state of the
@@ -126,17 +124,17 @@ const PROBE_INTERVAL: Duration = Duration::from_secs(1);
/// may fail as a result.
///
/// Whilst in the probing state, the result of each allowed probing request is
-/// recorded - once at least [`NUM_PROBES`] requests have been completed and the
+/// recorded - once at least the configured number of probe requests have been completed and the
/// ratio of errors drops below [`MAX_ERROR_RATIO`], the circuit breaker
/// transitions to "closed/healthy".
///
-/// If [`NUM_PROBES`] requests have been completed and the ratio of errors to
+/// If the configured number of probe requests have been completed and the ratio of errors to
/// successes continues to be above [`MAX_ERROR_RATIO`], transition back to
/// "open/unhealthy" to wait another [`PROBE_INTERVAL`] delay before
-/// transitioning back to the probing state and allowing another [`NUM_PROBES`]
+/// transitioning back to the probing state and allowing another the set of probe
/// requests to proceed.
///
-/// If there are not enough requests made to exceed [`NUM_PROBES`] within a
+/// If there are not enough requests made to exceed the configured number of probes within a
/// period of [`PROBE_INTERVAL`], all requests are probes and are allowed
/// through.
#[derive(Debug)]
@@ -145,7 +143,7 @@ pub struct CircuitBreaker {
/// current error window.
///
/// When the total number of requests ([`RequestCounterValue::total()`]) is
- /// less than [`NUM_PROBES`], the circuit is in the "probing" regime. When
+ /// less than the configured number of probes, the circuit is in the "probing" regime. When
/// the number of requests is greater than this amount, the circuit
/// open/closed state depends on the current error ratio.
requests: Arc<RequestCounter>,
@@ -161,13 +159,15 @@ pub struct CircuitBreaker {
///
/// Used for logging context only.
endpoint: Arc<str>,
+
+ num_probes: u64,
}
#[derive(Debug, Default)]
struct ProbeState {
/// The instant at which this set of probes started to be sent.
///
- /// Up to [`NUM_PROBES`] SHOULD be sent in the time range between this
+ /// Up to the configured probe limit SHOULD be sent in the time range between this
/// timestamp plus [`PROBE_INTERVAL`].
probe_window_started_at: Option<Instant>,
/// The number of probes sent so far in this [`PROBE_INTERVAL`].
@@ -175,7 +175,11 @@ struct ProbeState {
}
impl CircuitBreaker {
- pub(crate) fn new(endpoint: impl Into<Arc<str>>, error_window: Duration) -> Self {
+ pub(crate) fn new(
+ endpoint: impl Into<Arc<str>>,
+ error_window: Duration,
+ num_probes: u64,
+ ) -> Self {
let requests = Arc::new(RequestCounter::default());
let s = Self {
requests: Arc::clone(&requests),
@@ -185,10 +189,11 @@ impl CircuitBreaker {
ticker.set_missed_tick_behavior(MissedTickBehavior::Delay);
loop {
ticker.tick().await;
- reset_closed_state_counters(&requests);
+ reset_closed_state_counters(&requests, num_probes);
}
}),
endpoint: endpoint.into(),
+ num_probes,
};
s.set_healthy();
s
@@ -196,7 +201,7 @@ impl CircuitBreaker {
/// Force-set the state of the circuit breaker to "closed" / healthy.
pub(crate) fn set_healthy(&self) {
- self.requests.set(NUM_PROBES as u32, 0);
+ self.requests.set(self.num_probes as u32, 0);
}
/// Observe a request result, recording the success/error.
@@ -223,7 +228,7 @@ impl CircuitBreaker {
// If the counts have previously transitioned to being in the probing
// state, the circuit breaker can't be healthy, and we don't need to
// check the error ratio.
- if is_probing(counts) {
+ if is_probing(counts, self.num_probes) {
return false;
}
@@ -238,7 +243,7 @@ impl CircuitBreaker {
/// "closed/healthy" state; callers should check `is_healthy` first and only
/// call this if `is_healthy` returns `false`.
///
- /// This method will return `true` at most [`NUM_PROBES`] per
+ /// This method will return `true` at most `num_probes` per
/// [`PROBE_INTERVAL`] discrete duration.
///
/// # Blocking
@@ -250,7 +255,7 @@ impl CircuitBreaker {
/// Concurrent requests that were started before (or after) `self` switches
/// to the probing regime without observing [`Self::should_probe()`] as
/// `true` count as probes when evaluating the state of the circuit. These
- /// requests are in addition to the [`NUM_PROBES`] guaranteed to observe
+ /// requests are in addition to the configured number of probes guaranteed to observe
/// `true` when calling this method.
pub(crate) fn should_probe(&self) -> bool {
// Enable this code path only when probing needs to start, or has
@@ -273,7 +278,7 @@ impl CircuitBreaker {
// has been reset because of the `return false` in the next
// match arm that prevents the increase of
// `guard.probes_started` if it has reached `NUM_PROBES`.
- assert!(guard.probes_started <= NUM_PROBES);
+ assert!(guard.probes_started <= self.num_probes);
// Record the start of a probing interval.
guard.probe_window_started_at = Some(now);
// Reset the number of probes allowed.
@@ -292,7 +297,7 @@ impl CircuitBreaker {
//
// If there have already been the configured number of probes,
// do not allow more.
- if guard.probes_started >= NUM_PROBES {
+ if guard.probes_started >= self.num_probes {
debug!(
endpoint=%self.endpoint,
"probes exhausted"
@@ -316,7 +321,7 @@ impl CircuitBreaker {
debug!(
nth_probe = guard.probes_started,
- max_probes = NUM_PROBES,
+ max_probes = self.num_probes,
endpoint=%self.endpoint,
"sending probe"
);
@@ -333,12 +338,12 @@ impl Drop for CircuitBreaker {
// Returns `true` if the circuit is currently in the "probe" state.
#[inline]
-fn is_probing(counts: RequestCounterValue) -> bool {
+fn is_probing(counts: RequestCounterValue, num_probes: u64) -> bool {
// When there are less than `NUM_PROBES` completed requests, the circuit is
// not closed/healthy, as some previous call to `should_probe` has observed
// an error rate to put the circuit into the probing state, which resets the
// request counts to start at 0.
- counts.total() < NUM_PROBES
+ counts.total() < num_probes
}
/// Return `true` if the current ratio of errors is below MAX_ERROR_RATIO,
@@ -363,17 +368,17 @@ fn is_healthy(counts: RequestCounterValue) -> bool {
/// Resets the absolute request counter values if the current circuit state is
/// "closed" (healthy, not probing) at the time of the call, such that the there
-/// must be [`NUM_PROBES`] * [`MAX_ERROR_RATIO`] number of failed requests to open the
+/// must be `num_probes` * [`MAX_ERROR_RATIO`] number of failed requests to open the
/// circuit (mark as unhealthy).
///
/// Retains the closed/healthy state of the circuit. This is NOT an atomic
/// operation.
-fn reset_closed_state_counters(counters: &RequestCounter) {
+fn reset_closed_state_counters(counters: &RequestCounter, num_probes: u64) {
let counts = counters.read();
- if !is_healthy(counts) || is_probing(counts) {
+ if !is_healthy(counts) || is_probing(counts, num_probes) {
return;
}
- counters.set(NUM_PROBES as u32, 0);
+ counters.set(num_probes as u32, 0);
}
/// A store of two `u32` that can be read atomically; one tracking successful
@@ -449,9 +454,9 @@ mod tests {
/// Assert that calling [`reset_closed_state_counters`] does nothing.
#[track_caller]
- fn assert_reset_is_nop(counters: &RequestCounter) {
+ fn assert_reset_is_nop(counters: &RequestCounter, num_probes: u64) {
let v = counters.read();
- reset_closed_state_counters(counters);
+ reset_closed_state_counters(counters, num_probes);
assert_eq!(v, counters.read());
}
@@ -463,7 +468,7 @@ mod tests {
/// Return a new [`CircuitBreaker`] with the reset ticker disabled.
fn new_no_reset() -> CircuitBreaker {
- let c = CircuitBreaker::new("bananas", Duration::from_secs(5));
+ let c = CircuitBreaker::new("bananas", Duration::from_secs(5), 10);
c.reset_task.abort();
c
}
@@ -478,7 +483,7 @@ mod tests {
// The circuit breaker starts in a healthy state.
assert!(c.is_healthy());
assert!(!c.should_probe());
- assert_reset_is_nop(&c.requests);
+ assert_reset_is_nop(&c.requests, c.num_probes);
// Observe enough errors to become unhealthy
let n = errors_to_unhealthy(&c.requests);
@@ -490,12 +495,12 @@ mod tests {
// It should then transition to the probe state, and allow the
// configured amount of probe requests.
- for _ in 0..NUM_PROBES {
+ for _ in 0..c.num_probes {
assert!(!c.is_healthy());
assert!(c.should_probe());
// Counter resets should not be allowed when the circuit is not
// healthy.
- assert_reset_is_nop(&c.requests);
+ assert_reset_is_nop(&c.requests, c.num_probes);
}
// And once NUM_PROBES is reached, stop allowing more probes.
@@ -503,7 +508,7 @@ mod tests {
// It should remain unhealthy during this time.
assert!(!c.should_probe());
assert!(!c.is_healthy());
- assert_reset_is_nop(&c.requests);
+ assert_reset_is_nop(&c.requests, c.num_probes);
// Pretend it is time to probe again.
c.probes.lock().probe_window_started_at = Some(
@@ -512,13 +517,13 @@ mod tests {
.expect("instant cannot roll back far enough - test issue, not code issue"),
);
- for _ in 0..(NUM_PROBES - 1) {
+ for _ in 0..(c.num_probes - 1) {
// Recording a successful probe request should not mark the circuit
// as healthy until the NUM_PROBES has been observed.
assert!(c.should_probe());
assert!(!c.is_healthy());
c.requests.observe::<(), ()>(&Ok(()));
- assert_reset_is_nop(&c.requests);
+ assert_reset_is_nop(&c.requests, c.num_probes);
}
// Upon reaching NUM_PROBES of entirely successful requests, the circuit
@@ -553,27 +558,27 @@ mod tests {
//
// Observing half of them as failing should end probing until the next
// probe period.
- let n_failed = NUM_PROBES / 2;
+ let n_failed = c.num_probes / 2;
for _ in 0..(n_failed) {
assert!(!c.is_healthy());
assert!(c.should_probe());
- assert_reset_is_nop(&c.requests);
+ assert_reset_is_nop(&c.requests, c.num_probes);
c.requests.observe::<(), ()>(&Ok(()));
}
- for _ in 0..(NUM_PROBES - n_failed) {
+ for _ in 0..(c.num_probes - n_failed) {
assert!(!c.is_healthy());
assert!(c.should_probe());
- assert_reset_is_nop(&c.requests);
+ assert_reset_is_nop(&c.requests, c.num_probes);
c.requests.observe::<(), ()>(&Err(()));
}
- assert_eq!(c.requests.read().total(), NUM_PROBES);
+ assert_eq!(c.requests.read().total(), c.num_probes);
// The probes did not drive the circuit breaker to closed/healthy.
assert!(!c.is_healthy());
// And no more probes should be allowed.
assert!(!c.should_probe());
// The request counters should not reset when unhealthy.
- assert_reset_is_nop(&c.requests);
+ assert_reset_is_nop(&c.requests, c.num_probes);
// Pretend it is time to probe again.
c.probes.lock().probe_window_started_at = Some(
@@ -583,10 +588,10 @@ mod tests {
);
// Do the probe requests, all succeeding.
- for _ in 0..NUM_PROBES {
+ for _ in 0..c.num_probes {
assert!(!c.is_healthy());
assert!(c.should_probe());
- assert_reset_is_nop(&c.requests);
+ assert_reset_is_nop(&c.requests, c.num_probes);
c.requests.observe::<(), ()>(&Ok(()));
}
@@ -602,11 +607,11 @@ mod tests {
/// error window periods from changing the circuit to open/unhealthy.
#[tokio::test]
async fn test_periodic_counter_reset() {
- let c = CircuitBreaker::new("bananas", Duration::from_secs(5));
+ let c = CircuitBreaker::new("bananas", Duration::from_secs(5), 10);
// Assert the circuit breaker as healthy.
assert!(c.is_healthy());
- assert_reset_is_nop(&c.requests);
+ assert_reset_is_nop(&c.requests, c.num_probes);
// Calculate how many errors are needed to mark the circuit breaker as
// unhealthy.
@@ -622,7 +627,7 @@ mod tests {
// Reset the counters for the new error observation window
let v = c.requests.read();
- reset_closed_state_counters(&c.requests);
+ reset_closed_state_counters(&c.requests, c.num_probes);
assert_ne!(v, c.requests.read());
assert!(c.is_healthy());
@@ -668,7 +673,7 @@ mod tests {
// the error window advances and the counters are
// reset.
if (random::<usize>() % 50) == 0 {
- reset_closed_state_counters(&c.requests);
+ reset_closed_state_counters(&c.requests, c.num_probes);
}
}
}
@@ -690,7 +695,7 @@ mod tests {
);
// Drive successful probes if needed.
- for _ in 0..NUM_PROBES {
+ for _ in 0..c.num_probes {
if c.should_probe() {
c.requests.observe::<(), ()>(&Ok(()));
}
@@ -701,4 +706,34 @@ mod tests {
// successfully probed and driven healthy afterwards.
assert!(c.is_healthy(), "{c:?}");
}
+
+ /// Assert that when configured for low write volumes, a single successful
+ /// request is sufficient to drive the state to "healthy".
+ #[tokio::test]
+ async fn test_low_volume_configuration() {
+ let c = CircuitBreaker::new("bananas", Duration::from_secs(5), 1);
+ c.reset_task.abort();
+
+ // Assert the circuit breaker as healthy.
+ assert!(c.is_healthy());
+ assert_reset_is_nop(&c.requests, c.num_probes);
+
+ // Calculate how many errors are needed to mark the circuit breaker as
+ // unhealthy.
+ let n = errors_to_unhealthy(&c.requests);
+
+ // Drive the circuit to unhealthy.
+ for _ in 0..n {
+ assert!(c.is_healthy());
+ c.requests.observe::<(), ()>(&Err(()));
+ }
+ assert!(!c.is_healthy());
+
+ // Observe a single successful request
+ assert!(c.should_probe());
+ c.requests.observe::<(), ()>(&Ok(()));
+
+ // And the circuit should now be healthy.
+ assert!(c.is_healthy());
+ }
}
diff --git a/router/src/dml_handlers/rpc_write/circuit_breaking_client.rs b/router/src/dml_handlers/rpc_write/circuit_breaking_client.rs
index 0efd42a96c..b1aff72408 100644
--- a/router/src/dml_handlers/rpc_write/circuit_breaking_client.rs
+++ b/router/src/dml_handlers/rpc_write/circuit_breaking_client.rs
@@ -55,9 +55,14 @@ impl<T> CircuitBreakingClient<T> {
inner: T,
endpoint_name: impl Into<Arc<str>>,
error_window: Duration,
+ num_probes_per_client: u64,
) -> Self {
let endpoint_name = endpoint_name.into();
- let state = CircuitBreaker::new(Arc::clone(&endpoint_name), error_window);
+ let state = CircuitBreaker::new(
+ Arc::clone(&endpoint_name),
+ error_window,
+ num_probes_per_client,
+ );
state.set_healthy();
Self {
inner,
@@ -189,6 +194,7 @@ mod tests {
MockWriteClient::default(),
"bananas",
Duration::from_secs(5),
+ 10,
)
.with_circuit_breaker(Arc::clone(&circuit_breaker));
@@ -221,9 +227,13 @@ mod tests {
.into_iter(),
)),
);
- let wrapper =
- CircuitBreakingClient::new(Arc::clone(&mock_client), "bananas", Duration::from_secs(5))
- .with_circuit_breaker(Arc::clone(&circuit_breaker));
+ let wrapper = CircuitBreakingClient::new(
+ Arc::clone(&mock_client),
+ "bananas",
+ Duration::from_secs(5),
+ 10,
+ )
+ .with_circuit_breaker(Arc::clone(&circuit_breaker));
assert_eq!(circuit_breaker.ok_count(), 0);
assert_eq!(circuit_breaker.err_count(), 0);
diff --git a/router/tests/common/mod.rs b/router/tests/common/mod.rs
index 6550b3b58e..049fcd493e 100644
--- a/router/tests/common/mod.rs
+++ b/router/tests/common/mod.rs
@@ -35,6 +35,7 @@ pub struct TestContextBuilder {
namespace_autocreation: MissingNamespaceAction,
single_tenancy: bool,
rpc_write_error_window: Duration,
+ rpc_write_num_probes: u64,
}
impl Default for TestContextBuilder {
@@ -43,6 +44,7 @@ impl Default for TestContextBuilder {
namespace_autocreation: MissingNamespaceAction::Reject,
single_tenancy: false,
rpc_write_error_window: Duration::from_secs(5),
+ rpc_write_num_probes: 10,
}
}
}
@@ -82,6 +84,7 @@ impl TestContextBuilder {
catalog,
metrics,
self.rpc_write_error_window,
+ self.rpc_write_num_probes,
)
.await
}
@@ -98,6 +101,7 @@ pub struct TestContext {
namespace_autocreation: MissingNamespaceAction,
single_tenancy: bool,
rpc_write_error_window: Duration,
+ rpc_write_num_probes: u64,
}
// This mass of words is certainly a downside of chained handlers.
@@ -138,6 +142,7 @@ impl TestContext {
catalog: Arc<dyn Catalog>,
metrics: Arc<metric::Registry>,
rpc_write_error_window: Duration,
+ rpc_write_num_probes: u64,
) -> Self {
let client = Arc::new(MockWriteClient::default());
let rpc_writer = RpcWrite::new(
@@ -145,6 +150,7 @@ impl TestContext {
1.try_into().unwrap(),
&metrics,
rpc_write_error_window,
+ rpc_write_num_probes,
);
let ns_cache = Arc::new(ReadThroughCache::new(
@@ -202,6 +208,7 @@ impl TestContext {
namespace_autocreation,
single_tenancy,
rpc_write_error_window,
+ rpc_write_num_probes,
}
}
@@ -216,6 +223,7 @@ impl TestContext {
catalog,
metrics,
self.rpc_write_error_window,
+ self.rpc_write_num_probes,
)
.await
}
|
ec0d1375d46264878c31315cb9b95054384fb173
|
Dom Dwyer
|
2023-05-23 11:50:32
|
put timestamps in retention error
|
Include the minimum acceptable timestamp (the retention bound) and the
observed timestamp that exceeds this bound in the retention enforcement
write error response.
| null |
feat(router): put timestamps in retention error
Include the minimum acceptable timestamp (the retention bound) and the
observed timestamp that exceeds this bound in the retention enforcement
write error response.
|
diff --git a/router/src/dml_handlers/retention_validation.rs b/router/src/dml_handlers/retention_validation.rs
index 8946e6dd7a..eb612e2ae5 100644
--- a/router/src/dml_handlers/retention_validation.rs
+++ b/router/src/dml_handlers/retention_validation.rs
@@ -14,8 +14,20 @@ use super::DmlHandler;
#[derive(Debug, Error)]
pub enum RetentionError {
/// Time is outside the retention period.
- #[error("data in table {0} is outside of the retention period")]
- OutsideRetention(String),
+ #[error(
+ "data in table {table_name} is outside of the retention period: minimum \
+ acceptable timestamp is {min_acceptable_ts}, but observed timestamp \
+ {observed_ts} is older."
+ )]
+ OutsideRetention {
+ /// The minimum row timestamp that will be considered within the
+ /// retention period.
+ min_acceptable_ts: iox_time::Time,
+ /// The timestamp in the write that exceeds the retention minimum.
+ observed_ts: iox_time::Time,
+ /// The table name in which the observed timestamp was found.
+ table_name: String,
+ },
}
/// A [`DmlHandler`] implementation that validates that the write is within the
@@ -58,7 +70,11 @@ impl DmlHandler for RetentionValidator {
for (table_name, batch) in &batch {
if let Some(min) = batch.timestamp_summary().and_then(|v| v.stats.min) {
if min < min_retention {
- return Err(RetentionError::OutsideRetention(table_name.clone()));
+ return Err(RetentionError::OutsideRetention {
+ table_name: table_name.clone(),
+ min_acceptable_ts: iox_time::Time::from_timestamp_nanos(min_retention),
+ observed_ts: iox_time::Time::from_timestamp_nanos(min),
+ });
}
}
}
diff --git a/router/src/server/http.rs b/router/src/server/http.rs
index d5cd70bcb3..1920611b8d 100644
--- a/router/src/server/http.rs
+++ b/router/src/server/http.rs
@@ -149,7 +149,7 @@ impl From<&DmlError> for StatusCode {
DmlError::Internal(_) => StatusCode::INTERNAL_SERVER_ERROR,
DmlError::Partition(PartitionError::BatchWrite(_)) => StatusCode::INTERNAL_SERVER_ERROR,
- DmlError::Retention(RetentionError::OutsideRetention(_)) => StatusCode::FORBIDDEN,
+ DmlError::Retention(RetentionError::OutsideRetention { .. }) => StatusCode::FORBIDDEN,
DmlError::RpcWrite(RpcWriteError::Client(RpcWriteClientError::Upstream(_))) => {
StatusCode::INTERNAL_SERVER_ERROR
}
diff --git a/router/tests/grpc.rs b/router/tests/grpc.rs
index fd940bae2d..ab26989bfd 100644
--- a/router/tests/grpc.rs
+++ b/router/tests/grpc.rs
@@ -482,9 +482,10 @@ async fn test_update_namespace_0_retention_period() {
assert_matches!(
err,
router::server::http::Error::DmlHandler(DmlError::Retention(
- RetentionError::OutsideRetention(name)
+ RetentionError::OutsideRetention{table_name, min_acceptable_ts, observed_ts}
)) => {
- assert_eq!(name, "platanos");
+ assert_eq!(table_name, "platanos");
+ assert!(observed_ts < min_acceptable_ts);
}
);
diff --git a/router/tests/http.rs b/router/tests/http.rs
index 5b5cb65dcd..d284f0fe00 100644
--- a/router/tests/http.rs
+++ b/router/tests/http.rs
@@ -110,9 +110,10 @@ async fn test_write_outside_retention_period() {
&response,
router::server::http::Error::DmlHandler(
DmlError::Retention(
- RetentionError::OutsideRetention(e))
+ RetentionError::OutsideRetention{table_name, min_acceptable_ts, observed_ts})
) => {
- assert_eq!(e, "apple");
+ assert_eq!(table_name, "apple");
+ assert!(observed_ts < min_acceptable_ts);
}
);
assert_eq!(response.as_status_code(), StatusCode::FORBIDDEN);
|
efb964c390d037d9ba03038fffd8a2235827619c
|
Carol (Nichols || Goulding)
|
2022-10-14 07:34:17
|
Enforce table column limits from the schema cache (#5819)
|
* fix: Avoid some allocations by collecting instead of inserting into a vec
* refactor: Encode that adding columns is for one table at a time
* test: Add another test of column limits
* test: Add below/above limit tests for create_or_get_many
* fix: Explicitly DO NOT check column limits when inserting many columns
* feat: Cache the max_columns_per_table on the NamespaceSchema
* feat: Add a function to validate column limits in-memory
* fix: Provide more useful information when over column limits
* fix: Swap types to remove intermediate allocation
* docs: Explain the interactions of the cache and the column limits
* test: Actually set up test that showcases column limit race condition
* fix: Allow writing to existing columns even if table is over column limit
|
Co-authored-by: Dom <[email protected]>
|
feat: Enforce table column limits from the schema cache (#5819)
* fix: Avoid some allocations by collecting instead of inserting into a vec
* refactor: Encode that adding columns is for one table at a time
* test: Add another test of column limits
* test: Add below/above limit tests for create_or_get_many
* fix: Explicitly DO NOT check column limits when inserting many columns
* feat: Cache the max_columns_per_table on the NamespaceSchema
* feat: Add a function to validate column limits in-memory
* fix: Provide more useful information when over column limits
* fix: Swap types to remove intermediate allocation
* docs: Explain the interactions of the cache and the column limits
* test: Actually set up test that showcases column limit race condition
* fix: Allow writing to existing columns even if table is over column limit
Co-authored-by: Dom <[email protected]>
|
diff --git a/Cargo.lock b/Cargo.lock
index 62fac7061b..2fdf98b444 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -4447,6 +4447,7 @@ dependencies = [
"hashbrown",
"hyper",
"iox_catalog",
+ "iox_tests",
"iox_time",
"metric",
"mutable_batch",
diff --git a/data_types/src/lib.rs b/data_types/src/lib.rs
index 48a4b8842c..8d0d36517a 100644
--- a/data_types/src/lib.rs
+++ b/data_types/src/lib.rs
@@ -25,7 +25,7 @@ use snafu::{ResultExt, Snafu};
use sqlx::postgres::PgHasArrayType;
use std::{
borrow::{Borrow, Cow},
- collections::{BTreeMap, HashMap},
+ collections::{BTreeMap, BTreeSet, HashMap},
convert::TryFrom,
fmt::{Display, Write},
mem::{self, size_of_val},
@@ -464,16 +464,24 @@ pub struct NamespaceSchema {
pub query_pool_id: QueryPoolId,
/// the tables in the namespace by name
pub tables: BTreeMap<String, TableSchema>,
+ /// the number of columns per table this namespace allows
+ pub max_columns_per_table: usize,
}
impl NamespaceSchema {
/// Create a new `NamespaceSchema`
- pub fn new(id: NamespaceId, topic_id: TopicId, query_pool_id: QueryPoolId) -> Self {
+ pub fn new(
+ id: NamespaceId,
+ topic_id: TopicId,
+ query_pool_id: QueryPoolId,
+ max_columns_per_table: i32,
+ ) -> Self {
Self {
id,
tables: BTreeMap::new(),
topic_id,
query_pool_id,
+ max_columns_per_table: max_columns_per_table as usize,
}
}
@@ -547,6 +555,12 @@ impl TableSchema {
.map(|(name, c)| (c.id, name.as_str()))
.collect()
}
+
+ /// Return the set of column names for this table. Used in combination with a write operation's
+ /// column names to determine whether a write would exceed the max allowed columns.
+ pub fn column_names(&self) -> BTreeSet<&str> {
+ self.columns.keys().map(|name| name.as_str()).collect()
+ }
}
/// Data object for a column
@@ -3367,12 +3381,14 @@ mod tests {
topic_id: TopicId::new(2),
query_pool_id: QueryPoolId::new(3),
tables: BTreeMap::from([]),
+ max_columns_per_table: 4,
};
let schema2 = NamespaceSchema {
id: NamespaceId::new(1),
topic_id: TopicId::new(2),
query_pool_id: QueryPoolId::new(3),
tables: BTreeMap::from([(String::from("foo"), TableSchema::new(TableId::new(1)))]),
+ max_columns_per_table: 4,
};
assert!(schema1.size() < schema2.size());
}
diff --git a/import/src/aggregate_tsm_schema/update_catalog.rs b/import/src/aggregate_tsm_schema/update_catalog.rs
index 8243ec16c0..f371df8abf 100644
--- a/import/src/aggregate_tsm_schema/update_catalog.rs
+++ b/import/src/aggregate_tsm_schema/update_catalog.rs
@@ -220,7 +220,6 @@ where
// column doesn't exist; add it
column_batch.push(ColumnUpsertRequest {
name: tag.name.as_str(),
- table_id: table.id,
column_type: ColumnType::Tag,
});
}
@@ -257,7 +256,6 @@ where
// column doesn't exist; add it
column_batch.push(ColumnUpsertRequest {
name: field.name.as_str(),
- table_id: table.id,
column_type: ColumnType::from(influx_column_type),
});
}
@@ -270,7 +268,10 @@ where
// that with short-lived loop variables.
// since this is a CLI tool rather than something called a lot on the write path, i
// figure it's okay.
- repos.columns().create_or_get_many(&column_batch).await?;
+ repos
+ .columns()
+ .create_or_get_many_unchecked(table.id, &column_batch)
+ .await?;
}
// create a partition for every day in the date range.
// N.B. this will need updating if we someday support partitioning by inputs other than
diff --git a/ingester/src/data.rs b/ingester/src/data.rs
index 6a2e1dd542..d962fca3ba 100644
--- a/ingester/src/data.rs
+++ b/ingester/src/data.rs
@@ -599,7 +599,7 @@ mod tests {
Arc::clone(&metrics),
));
- let schema = NamespaceSchema::new(namespace.id, topic.id, query_pool.id);
+ let schema = NamespaceSchema::new(namespace.id, topic.id, query_pool.id, 100);
let ignored_ts = Time::from_timestamp_millis(42);
@@ -681,7 +681,7 @@ mod tests {
Arc::clone(&metrics),
));
- let schema = NamespaceSchema::new(namespace.id, topic.id, query_pool.id);
+ let schema = NamespaceSchema::new(namespace.id, topic.id, query_pool.id, 100);
let w1 = DmlWrite::new(
"foo",
@@ -788,7 +788,7 @@ mod tests {
Arc::clone(&metrics),
));
- let schema = NamespaceSchema::new(namespace.id, topic.id, query_pool.id);
+ let schema = NamespaceSchema::new(namespace.id, topic.id, query_pool.id, 100);
let ignored_ts = Time::from_timestamp_millis(42);
@@ -1058,7 +1058,7 @@ mod tests {
Arc::clone(&metrics),
));
- let schema = NamespaceSchema::new(namespace.id, topic.id, query_pool.id);
+ let schema = NamespaceSchema::new(namespace.id, topic.id, query_pool.id, 100);
let ignored_ts = Time::from_timestamp_millis(42);
@@ -1174,7 +1174,7 @@ mod tests {
.await
.unwrap();
- let schema = NamespaceSchema::new(namespace.id, topic.id, query_pool.id);
+ let schema = NamespaceSchema::new(namespace.id, topic.id, query_pool.id, 100);
let ignored_ts = Time::from_timestamp_millis(42);
@@ -1357,7 +1357,7 @@ mod tests {
Arc::clone(&metrics),
));
- let schema = NamespaceSchema::new(namespace.id, topic.id, query_pool.id);
+ let schema = NamespaceSchema::new(namespace.id, topic.id, query_pool.id, 100);
let ignored_ts = Time::from_timestamp_millis(42);
diff --git a/ingester/src/handler.rs b/ingester/src/handler.rs
index 55c070464a..5c3ae23fba 100644
--- a/ingester/src/handler.rs
+++ b/ingester/src/handler.rs
@@ -456,6 +456,7 @@ mod tests {
ingester.namespace.id,
ingester.topic.id,
ingester.query_pool.id,
+ 100,
);
let mut txn = ingester.catalog.start_transaction().await.unwrap();
let ingest_ts1 = Time::from_timestamp_millis(42);
@@ -708,7 +709,12 @@ mod tests {
let write_buffer_state =
MockBufferSharedState::empty_with_n_shards(NonZeroU32::try_from(1).unwrap());
- let schema = NamespaceSchema::new(namespace.id, topic.id, query_pool.id);
+ let schema = NamespaceSchema::new(
+ namespace.id,
+ topic.id,
+ query_pool.id,
+ namespace.max_columns_per_table,
+ );
for write_operation in write_operations {
validate_or_insert_schema(write_operation.tables(), &schema, txn.deref_mut())
.await
diff --git a/iox_catalog/src/interface.rs b/iox_catalog/src/interface.rs
index 350fa026c7..8d78d33cfc 100644
--- a/iox_catalog/src/interface.rs
+++ b/iox_catalog/src/interface.rs
@@ -332,13 +332,11 @@ pub trait TableRepo: Send + Sync {
}
/// Parameters necessary to perform a batch insert of
-/// [`ColumnRepo::create_or_get()`].
+/// [`ColumnRepo::create_or_get()`] for one table (specified separately)
#[derive(Debug)]
pub struct ColumnUpsertRequest<'a> {
/// The name of the column.
pub name: &'a str,
- /// The table ID to which it belongs.
- pub table_id: TableId,
/// The data type of the column.
pub column_type: ColumnType,
}
@@ -361,8 +359,13 @@ pub trait ColumnRepo: Send + Sync {
/// Implementations make no guarantees as to the ordering or atomicity of
/// the batch of column upsert operations - a batch upsert may partially
/// commit, in which case an error MUST be returned by the implementation.
- async fn create_or_get_many(
+ ///
+ /// Per-namespace limits on the number of columns allowed per table are explicitly NOT checked
+ /// by this function, hence the name containing `unchecked`. It is expected that the caller
+ /// will check this first-- and yes, this is racy.
+ async fn create_or_get_many_unchecked(
&mut self,
+ table_id: TableId,
columns: &[ColumnUpsertRequest<'_>],
) -> Result<Vec<Column>>;
@@ -714,8 +717,12 @@ where
let columns = repos.columns().list_by_namespace_id(namespace.id).await?;
let tables = repos.tables().list_by_namespace_id(namespace.id).await?;
- let mut namespace =
- NamespaceSchema::new(namespace.id, namespace.topic_id, namespace.query_pool_id);
+ let mut namespace = NamespaceSchema::new(
+ namespace.id,
+ namespace.topic_id,
+ namespace.query_pool_id,
+ namespace.max_columns_per_table,
+ );
let mut table_id_to_schema = BTreeMap::new();
for t in tables {
@@ -846,7 +853,8 @@ pub async fn list_schemas(
// was created, or have no tables/columns (and therefore have no entry
// in "joined").
.filter_map(move |v| {
- let mut ns = NamespaceSchema::new(v.id, v.topic_id, v.query_pool_id);
+ let mut ns =
+ NamespaceSchema::new(v.id, v.topic_id, v.query_pool_id, v.max_columns_per_table);
ns.tables = joined.remove(&v.id)?;
Some((v, ns))
});
@@ -1179,14 +1187,7 @@ pub(crate) mod test_helpers {
.create_or_get("column_test", table.id, ColumnType::U64)
.await
.expect_err("should error with wrong column type");
- assert!(matches!(
- err,
- Error::ColumnTypeMismatch {
- name: _,
- existing: _,
- new: _
- }
- ));
+ assert!(matches!(err, Error::ColumnTypeMismatch { .. }));
// test that we can create a column of the same name under a different table
let table2 = repos
@@ -1201,23 +1202,6 @@ pub(crate) mod test_helpers {
.unwrap();
assert_ne!(c, ccc);
- let cols3 = repos
- .columns()
- .create_or_get_many(&[
- ColumnUpsertRequest {
- name: "a",
- table_id: table2.id,
- column_type: ColumnType::U64,
- },
- ColumnUpsertRequest {
- name: "a",
- table_id: table.id,
- column_type: ColumnType::U64,
- },
- ])
- .await
- .unwrap();
-
let columns = repos
.columns()
.list_by_namespace_id(namespace.id)
@@ -1225,12 +1209,11 @@ pub(crate) mod test_helpers {
.unwrap();
let mut want = vec![c.clone(), ccc];
- want.extend(cols3.clone());
assert_eq!(want, columns);
let columns = repos.columns().list_by_table_id(table.id).await.unwrap();
- let want2 = vec![c, cols3[1].clone()];
+ let want2 = vec![c];
assert_eq!(want2, columns);
// Add another tag column into table2
@@ -1252,7 +1235,7 @@ pub(crate) mod test_helpers {
},
ColumnTypeCount {
col_type: ColumnType::U64,
- count: 2,
+ count: 1,
},
];
expect.sort_by_key(|c| c.col_type);
@@ -1264,6 +1247,28 @@ pub(crate) mod test_helpers {
want.extend([c3]);
assert_eq!(list, want);
+ // test create_or_get_many_unchecked, below column limit
+ let table1_columns = repos
+ .columns()
+ .create_or_get_many_unchecked(
+ table.id,
+ &[
+ ColumnUpsertRequest {
+ name: "column_test",
+ column_type: ColumnType::Tag,
+ },
+ ColumnUpsertRequest {
+ name: "new_column",
+ column_type: ColumnType::Tag,
+ },
+ ],
+ )
+ .await
+ .unwrap();
+ let mut table1_column_names: Vec<_> = table1_columns.iter().map(|c| &c.name).collect();
+ table1_column_names.sort();
+ assert_eq!(table1_column_names, vec!["column_test", "new_column"]);
+
// test per-namespace column limits
repos
.namespaces()
@@ -1282,6 +1287,33 @@ pub(crate) mod test_helpers {
table_id: _,
}
));
+
+ // test per-namespace column limits are NOT enforced with create_or_get_many_unchecked
+ let table3 = repos
+ .tables()
+ .create_or_get("test_table_3", namespace.id)
+ .await
+ .unwrap();
+ let table3_columns = repos
+ .columns()
+ .create_or_get_many_unchecked(
+ table3.id,
+ &[
+ ColumnUpsertRequest {
+ name: "apples",
+ column_type: ColumnType::Tag,
+ },
+ ColumnUpsertRequest {
+ name: "oranges",
+ column_type: ColumnType::Tag,
+ },
+ ],
+ )
+ .await
+ .unwrap();
+ let mut table3_column_names: Vec<_> = table3_columns.iter().map(|c| &c.name).collect();
+ table3_column_names.sort();
+ assert_eq!(table3_column_names, vec!["apples", "oranges"]);
}
async fn test_shards(catalog: Arc<dyn Catalog>) {
@@ -3989,7 +4021,7 @@ pub(crate) mod test_helpers {
let batches = mutable_batch_lp::lines_to_batches(lines, 42).unwrap();
let batches = batches.iter().map(|(table, batch)| (table.as_str(), batch));
- let ns = NamespaceSchema::new(namespace.id, topic.id, pool.id);
+ let ns = NamespaceSchema::new(namespace.id, topic.id, pool.id, 1000);
let schema = validate_or_insert_schema(batches, &ns, repos)
.await
diff --git a/iox_catalog/src/lib.rs b/iox_catalog/src/lib.rs
index f0911ce20b..13e595a46b 100644
--- a/iox_catalog/src/lib.rs
+++ b/iox_catalog/src/lib.rs
@@ -144,43 +144,47 @@ where
// If the table itself needs to be updated during column validation it
// becomes a Cow::owned() copy and the modified copy should be inserted into
// the schema before returning.
- let mut column_batch = Vec::default();
- for (name, col) in mb.columns() {
- // Check if the column exists in the cached schema.
- //
- // If it does, validate it. If it does not exist, create it and insert
- // it into the cached schema.
- match table.columns.get(name.as_str()) {
- Some(existing) if existing.matches_type(col.influx_type()) => {
- // No action is needed as the column matches the existing column
- // schema.
- }
- Some(existing) => {
- // The column schema, and the column in the mutable batch are of
- // different types.
- return ColumnTypeMismatchSnafu {
- name,
- existing: existing.column_type,
- new: col.influx_type(),
+ let column_batch: Vec<_> = mb
+ .columns()
+ .filter_map(|(name, col)| {
+ // Check if the column exists in the cached schema.
+ //
+ // If it does, validate it. If it does not exist, create it and insert
+ // it into the cached schema.
+ match table.columns.get(name.as_str()) {
+ Some(existing) if existing.matches_type(col.influx_type()) => {
+ // No action is needed as the column matches the existing column
+ // schema.
+ None
+ }
+ Some(existing) => {
+ // The column schema, and the column in the mutable batch are of
+ // different types.
+ Some(
+ ColumnTypeMismatchSnafu {
+ name,
+ existing: existing.column_type,
+ new: col.influx_type(),
+ }
+ .fail(),
+ )
+ }
+ None => {
+ // The column does not exist in the cache, add it to the column
+ // batch to be bulk inserted later.
+ Some(Ok(ColumnUpsertRequest {
+ name: name.as_str(),
+ column_type: ColumnType::from(col.influx_type()),
+ }))
}
- .fail();
- }
- None => {
- // The column does not exist in the cache, add it to the column
- // batch to be bulk inserted later.
- column_batch.push(ColumnUpsertRequest {
- name: name.as_str(),
- table_id: table.id,
- column_type: ColumnType::from(col.influx_type()),
- });
}
- };
- }
+ })
+ .collect::<Result<Vec<_>>>()?;
if !column_batch.is_empty() {
repos
.columns()
- .create_or_get_many(&column_batch)
+ .create_or_get_many_unchecked(table.id, &column_batch)
.await?
.into_iter()
.for_each(|c| table.to_mut().add_column(&c));
@@ -270,6 +274,7 @@ mod tests {
namespace.id,
namespace.topic_id,
namespace.query_pool_id,
+ namespace.max_columns_per_table,
);
// Apply all the lp literals as individual writes, feeding
diff --git a/iox_catalog/src/mem.rs b/iox_catalog/src/mem.rs
index efdcd9c958..45be0e8838 100644
--- a/iox_catalog/src/mem.rs
+++ b/iox_catalog/src/mem.rs
@@ -522,17 +522,51 @@ impl ColumnRepo for MemTxn {
Ok(column.clone())
}
- async fn create_or_get_many(
+
+ async fn create_or_get_many_unchecked(
&mut self,
+ table_id: TableId,
columns: &[ColumnUpsertRequest<'_>],
) -> Result<Vec<Column>> {
- let mut out = Vec::new();
- for column in columns {
- out.push(
- ColumnRepo::create_or_get(self, column.name, column.table_id, column.column_type)
- .await?,
- );
- }
+ // Explicitly NOT using `create_or_get` in this function: the Postgres catalog doesn't
+ // check column limits when inserting many columns because it's complicated and expensive,
+ // and for testing purposes the in-memory catalog needs to match its functionality.
+
+ let stage = self.stage();
+
+ let out: Vec<_> = columns
+ .iter()
+ .map(|column| {
+ match stage
+ .columns
+ .iter()
+ .find(|t| t.name == column.name && t.table_id == table_id)
+ {
+ Some(c) => {
+ ensure!(
+ column.column_type == c.column_type,
+ ColumnTypeMismatchSnafu {
+ name: column.name,
+ existing: c.column_type,
+ new: column.column_type
+ }
+ );
+ Ok(c.clone())
+ }
+ None => {
+ let new_column = Column {
+ id: ColumnId::new(stage.columns.len() as i64 + 1),
+ table_id,
+ name: column.name.to_string(),
+ column_type: column.column_type,
+ };
+ stage.columns.push(new_column);
+ Ok(stage.columns.last().unwrap().clone())
+ }
+ }
+ })
+ .collect::<Result<Vec<Column>>>()?;
+
Ok(out)
}
diff --git a/iox_catalog/src/metrics.rs b/iox_catalog/src/metrics.rs
index 15c32af062..cc0cde6735 100644
--- a/iox_catalog/src/metrics.rs
+++ b/iox_catalog/src/metrics.rs
@@ -219,7 +219,7 @@ decorate!(
"column_create_or_get" = create_or_get(&mut self, name: &str, table_id: TableId, column_type: ColumnType) -> Result<Column>;
"column_list_by_namespace_id" = list_by_namespace_id(&mut self, namespace_id: NamespaceId) -> Result<Vec<Column>>;
"column_list_by_table_id" = list_by_table_id(&mut self, table_id: TableId) -> Result<Vec<Column>>;
- "column_create_or_get_many" = create_or_get_many(&mut self, columns: &[ColumnUpsertRequest<'_>]) -> Result<Vec<Column>>;
+ "column_create_or_get_many_unchecked" = create_or_get_many_unchecked(&mut self, table_id: TableId, columns: &[ColumnUpsertRequest<'_>]) -> Result<Vec<Column>>;
"column_list" = list(&mut self) -> Result<Vec<Column>>;
"column_list_type_count_by_table_id" = list_type_count_by_table_id(&mut self, table_id: TableId) -> Result<Vec<ColumnTypeCount>>;
]
diff --git a/iox_catalog/src/postgres.rs b/iox_catalog/src/postgres.rs
index dd34d2087e..7561303d0e 100644
--- a/iox_catalog/src/postgres.rs
+++ b/iox_catalog/src/postgres.rs
@@ -939,31 +939,30 @@ WHERE table_id = $1;
Ok(rec)
}
- async fn create_or_get_many(
+ async fn create_or_get_many_unchecked(
&mut self,
+ table_id: TableId,
columns: &[ColumnUpsertRequest<'_>],
) -> Result<Vec<Column>> {
let mut v_name = Vec::new();
- let mut v_table_id = Vec::new();
let mut v_column_type = Vec::new();
for c in columns {
v_name.push(c.name.to_string());
- v_table_id.push(c.table_id.get());
v_column_type.push(c.column_type as i16);
}
let out = sqlx::query_as::<_, Column>(
r#"
INSERT INTO column_name ( name, table_id, column_type )
-SELECT name, table_id, column_type FROM UNNEST($1, $2, $3) as a(name, table_id, column_type)
+SELECT name, $1, column_type FROM UNNEST($2, $3) as a(name, column_type)
ON CONFLICT ON CONSTRAINT column_name_unique
DO UPDATE SET name = column_name.name
RETURNING *;
"#,
)
- .bind(&v_name)
- .bind(&v_table_id)
- .bind(&v_column_type)
+ .bind(&table_id) // $1
+ .bind(&v_name) // $2
+ .bind(&v_column_type) // $3
.fetch_all(&mut self.inner)
.await
.map_err(|e| {
@@ -2622,7 +2621,7 @@ mod tests {
assert_eq!(application_name, TEST_APPLICATION_NAME_NEW);
}
- macro_rules! test_column_create_or_get_many {
+ macro_rules! test_column_create_or_get_many_unchecked {
(
$name:ident,
calls = {$([$($col_name:literal => $col_type:expr),+ $(,)?]),+},
@@ -2630,7 +2629,7 @@ mod tests {
) => {
paste::paste! {
#[tokio::test]
- async fn [<test_column_create_or_get_many_ $name>]() {
+ async fn [<test_column_create_or_get_many_unchecked_ $name>]() {
// If running an integration test on your laptop, this requires that you have
// Postgres running and that you've done the sqlx migrations. See the README in
// this crate for info to set it up.
@@ -2668,7 +2667,6 @@ mod tests {
$(
ColumnUpsertRequest {
name: $col_name,
- table_id,
column_type: $col_type,
},
)+
@@ -2677,7 +2675,7 @@ mod tests {
.repositories()
.await
.columns()
- .create_or_get_many(&insert)
+ .create_or_get_many_unchecked(table_id, &insert)
.await;
// The returned columns MUST always match the requested
@@ -2686,13 +2684,13 @@ mod tests {
assert_eq!(insert.len(), got.len());
insert.iter().zip(got).for_each(|(req, got)| {
assert_eq!(req.name, got.name);
- assert_eq!(req.table_id, got.table_id);
+ assert_eq!(table_id, got.table_id);
assert_eq!(
req.column_type,
ColumnType::try_from(got.column_type).expect("invalid column type")
);
});
- assert_metric_hit(&metrics, "column_create_or_get_many");
+ assert_metric_hit(&metrics, "column_create_or_get_many_unchecked");
}
)+
@@ -2704,7 +2702,7 @@ mod tests {
// Issue a few calls to create_or_get_many that contain distinct columns and
// covers the full set of column types.
- test_column_create_or_get_many!(
+ test_column_create_or_get_many_unchecked!(
insert,
calls = {
[
@@ -2726,7 +2724,7 @@ mod tests {
// Issue two calls with overlapping columns - request should succeed (upsert
// semantics).
- test_column_create_or_get_many!(
+ test_column_create_or_get_many_unchecked!(
partial_upsert,
calls = {
[
@@ -2750,7 +2748,7 @@ mod tests {
);
// Issue two calls with the same columns and types.
- test_column_create_or_get_many!(
+ test_column_create_or_get_many_unchecked!(
full_upsert,
calls = {
[
@@ -2771,7 +2769,7 @@ mod tests {
// Issue two calls with overlapping columns with conflicting types and
// observe a correctly populated ColumnTypeMismatch error.
- test_column_create_or_get_many!(
+ test_column_create_or_get_many_unchecked!(
partial_type_conflict,
calls = {
[
@@ -2802,7 +2800,7 @@ mod tests {
// Issue one call containing a column specified twice, with differing types
// and observe an error different from the above test case.
- test_column_create_or_get_many!(
+ test_column_create_or_get_many_unchecked!(
intra_request_type_conflict,
calls = {
[
diff --git a/iox_tests/src/util.rs b/iox_tests/src/util.rs
index 19975138f8..cb3dae6614 100644
--- a/iox_tests/src/util.rs
+++ b/iox_tests/src/util.rs
@@ -265,6 +265,16 @@ impl TestNamespace {
.await
.unwrap()
}
+
+ /// Set the number of columns per table allowed in this namespace.
+ pub async fn update_column_limit(&self, new_max: i32) {
+ let mut repos = self.catalog.catalog.repositories().await;
+ repos
+ .namespaces()
+ .update_column_limit(&self.namespace.name, new_max)
+ .await
+ .unwrap();
+ }
}
/// A test shard with its namespace in the catalog
diff --git a/mutable_batch/src/lib.rs b/mutable_batch/src/lib.rs
index cf2eb3837a..817378b237 100644
--- a/mutable_batch/src/lib.rs
+++ b/mutable_batch/src/lib.rs
@@ -25,7 +25,7 @@ use iox_time::Time;
use schema::selection::Selection;
use schema::{builder::SchemaBuilder, Schema, TIME_COLUMN_NAME};
use snafu::{OptionExt, ResultExt, Snafu};
-use std::ops::Range;
+use std::{collections::BTreeSet, ops::Range};
pub mod column;
pub mod payload;
@@ -137,6 +137,12 @@ impl MutableBatch {
.map(move |(name, idx)| (name, &self.columns[*idx]))
}
+ /// Return the set of column names for this table. Used in combination with a write operation's
+ /// column names to determine whether a write would exceed the max allowed columns.
+ pub fn column_names(&self) -> BTreeSet<&str> {
+ self.column_names.keys().map(|name| name.as_str()).collect()
+ }
+
/// Return the number of rows in this chunk
pub fn rows(&self) -> usize {
self.row_count
diff --git a/router/Cargo.toml b/router/Cargo.toml
index e26d49b6cd..8f7798f8f7 100644
--- a/router/Cargo.toml
+++ b/router/Cargo.toml
@@ -43,6 +43,7 @@ write_summary = { path = "../write_summary" }
[dev-dependencies]
assert_matches = "1.5"
criterion = { version = "0.4", default-features = false, features = ["async_tokio", "rayon"]}
+iox_tests = { path = "../iox_tests" }
once_cell = "1"
paste = "1.0.9"
pretty_assertions = "1.3.0"
diff --git a/router/src/dml_handlers/ns_autocreation.rs b/router/src/dml_handlers/ns_autocreation.rs
index 9bc9bc00b8..81e5ebb608 100644
--- a/router/src/dml_handlers/ns_autocreation.rs
+++ b/router/src/dml_handlers/ns_autocreation.rs
@@ -149,6 +149,7 @@ mod tests {
topic_id: TopicId::new(2),
query_pool_id: QueryPoolId::new(3),
tables: Default::default(),
+ max_columns_per_table: 4,
},
);
diff --git a/router/src/dml_handlers/schema_validation.rs b/router/src/dml_handlers/schema_validation.rs
index d314aaeffc..c84c5bf75d 100644
--- a/router/src/dml_handlers/schema_validation.rs
+++ b/router/src/dml_handlers/schema_validation.rs
@@ -1,7 +1,7 @@
use super::DmlHandler;
use crate::namespace_cache::{metrics::InstrumentedCache, MemoryNamespaceCache, NamespaceCache};
use async_trait::async_trait;
-use data_types::{DatabaseName, DeletePredicate};
+use data_types::{DatabaseName, DeletePredicate, NamespaceSchema};
use hashbrown::HashMap;
use iox_catalog::{
interface::{get_schema_by_name, Catalog, Error as CatalogError},
@@ -23,7 +23,7 @@ pub enum SchemaError {
/// The user has hit their column/table limit.
#[error("service limit reached: {0}")]
- ServiceLimit(iox_catalog::interface::Error),
+ ServiceLimit(Box<dyn std::error::Error + Send + Sync + 'static>),
/// The request schema conflicts with the existing namespace schema.
#[error("schema conflict: {0}")]
@@ -67,6 +67,22 @@ pub enum SchemaError {
/// relatively rare - it results in additional requests being made to the
/// catalog until the cached schema converges to match the catalog schema.
///
+/// Note that the namespace-wide limit of the number of columns allowed per table
+/// is also cached, which has two implications:
+///
+/// 1. If the namespace's column limit is updated in the catalog, the new limit
+/// will not be enforced until the whole namespace is recached, likely only
+/// on startup. In other words, updating the namespace's column limit requires
+/// both a catalog update and service restart.
+/// 2. There's a race condition that can result in a table ending up with more
+/// columns than the namespace limit should allow. When multiple concurrent
+/// writes come in to different service instances that each have their own
+/// cache, and each of those writes add a disjoint set of new columns, the
+/// requests will all succeed because when considered separately, they do
+/// not exceed the number of columns in the cache. Once all the writes have
+/// completed, the total set of columns in the table will be some multiple
+/// of the limit.
+///
/// # Correctness
///
/// The correct functioning of this schema validator relies on the catalog
@@ -178,6 +194,12 @@ where
}
};
+ validate_column_limits(&batches, &schema).map_err(|e| {
+ warn!(%namespace, error=%e, "service protection limit reached");
+ self.service_limit_hit.inc(1);
+ SchemaError::ServiceLimit(Box::new(e))
+ })?;
+
let maybe_new_schema = validate_or_insert_schema(
batches.iter().map(|(k, v)| (k.as_str(), v)),
&schema,
@@ -208,7 +230,7 @@ where
| CatalogError::TableCreateLimitError { .. } => {
warn!(%namespace, error=%e, "service protection limit reached");
self.service_limit_hit.inc(1);
- SchemaError::ServiceLimit(e.into_err())
+ SchemaError::ServiceLimit(Box::new(e.into_err()))
}
_ => {
error!(%namespace, error=%e, "schema validation failed");
@@ -253,17 +275,221 @@ where
}
}
+#[derive(Debug, Error)]
+#[error(
+ "couldn't create columns in table `{table_name}`; table contains \
+ {existing_column_count} existing columns, applying this write would result \
+ in {merged_column_count} columns, limit is {max_columns_per_table}"
+)]
+struct OverColumnLimit {
+ table_name: String,
+ // Number of columns already in the table.
+ existing_column_count: usize,
+ // Number of resultant columns after merging the write with existing columns.
+ merged_column_count: usize,
+ // The configured limit.
+ max_columns_per_table: usize,
+}
+
+fn validate_column_limits(
+ batches: &HashMap<String, MutableBatch>,
+ schema: &NamespaceSchema,
+) -> Result<(), OverColumnLimit> {
+ for (table_name, batch) in batches {
+ let mut existing_columns = schema
+ .tables
+ .get(table_name)
+ .map(|t| t.column_names())
+ .unwrap_or_default();
+ let existing_column_count = existing_columns.len();
+
+ let merged_column_count = {
+ existing_columns.append(&mut batch.column_names());
+ existing_columns.len()
+ };
+
+ // If the table is currently over the column limit but this write only includes existing
+ // columns and doesn't exceed the limit more, this is allowed.
+ let columns_were_added_in_this_batch = merged_column_count > existing_column_count;
+ let column_limit_exceeded = merged_column_count > schema.max_columns_per_table;
+
+ if columns_were_added_in_this_batch && column_limit_exceeded {
+ return Err(OverColumnLimit {
+ table_name: table_name.into(),
+ merged_column_count,
+ existing_column_count,
+ max_columns_per_table: schema.max_columns_per_table,
+ });
+ }
+ }
+
+ Ok(())
+}
+
#[cfg(test)]
mod tests {
use super::*;
use assert_matches::assert_matches;
- use data_types::{ColumnType, QueryPoolId, TimestampRange, TopicId};
- use iox_catalog::mem::MemCatalog;
+ use data_types::{ColumnType, TimestampRange};
+ use iox_tests::util::{TestCatalog, TestNamespace};
use once_cell::sync::Lazy;
use std::sync::Arc;
static NAMESPACE: Lazy<DatabaseName<'static>> = Lazy::new(|| "bananas".try_into().unwrap());
+ #[tokio::test]
+ async fn validate_limits() {
+ let (catalog, namespace) = test_setup().await;
+
+ namespace.update_column_limit(3).await;
+
+ // Table not found in schema,
+ {
+ let schema = namespace.schema().await;
+ // Columns under the limit is ok
+ let batches = lp_to_writes("nonexistent val=42i 123456");
+ assert!(validate_column_limits(&batches, &schema).is_ok());
+ // Columns over the limit is an error
+ let batches = lp_to_writes("nonexistent,tag1=A,tag2=B val=42i 123456");
+ assert_matches!(
+ validate_column_limits(&batches, &schema),
+ Err(OverColumnLimit {
+ table_name: _,
+ existing_column_count: 0,
+ merged_column_count: 4,
+ max_columns_per_table: 3,
+ })
+ );
+ }
+
+ // Table exists but no columns in schema,
+ {
+ namespace.create_table("no_columns_in_schema").await;
+ let schema = namespace.schema().await;
+ // Columns under the limit is ok
+ let batches = lp_to_writes("no_columns_in_schema val=42i 123456");
+ assert!(validate_column_limits(&batches, &schema).is_ok());
+ // Columns over the limit is an error
+ let batches = lp_to_writes("no_columns_in_schema,tag1=A,tag2=B val=42i 123456");
+ assert_matches!(
+ validate_column_limits(&batches, &schema),
+ Err(OverColumnLimit {
+ table_name: _,
+ existing_column_count: 0,
+ merged_column_count: 4,
+ max_columns_per_table: 3,
+ })
+ );
+ }
+
+ // Table exists with a column in the schema,
+ {
+ let table = namespace.create_table("i_got_columns").await;
+ table.create_column("i_got_music", ColumnType::I64).await;
+ let schema = namespace.schema().await;
+ // Columns already existing is ok
+ let batches = lp_to_writes("i_got_columns i_got_music=42i 123456");
+ assert!(validate_column_limits(&batches, &schema).is_ok());
+ // Adding columns under the limit is ok
+ let batches = lp_to_writes("i_got_columns,tag1=A i_got_music=42i 123456");
+ assert!(validate_column_limits(&batches, &schema).is_ok());
+ // Adding columns over the limit is an error
+ let batches = lp_to_writes("i_got_columns,tag1=A,tag2=B i_got_music=42i 123456");
+ assert_matches!(
+ validate_column_limits(&batches, &schema),
+ Err(OverColumnLimit {
+ table_name: _,
+ existing_column_count: 1,
+ merged_column_count: 4,
+ max_columns_per_table: 3,
+ })
+ );
+ }
+
+ // Table exists and is at the column limit,
+ {
+ let table = namespace.create_table("bananas").await;
+ table.create_column("greatness", ColumnType::I64).await;
+ table.create_column("tastiness", ColumnType::I64).await;
+ table
+ .create_column(schema::TIME_COLUMN_NAME, ColumnType::Time)
+ .await;
+ let schema = namespace.schema().await;
+ // Columns already existing is allowed
+ let batches = lp_to_writes("bananas greatness=42i 123456");
+ assert!(validate_column_limits(&batches, &schema).is_ok());
+ // Adding columns over the limit is an error
+ let batches = lp_to_writes("bananas i_got_music=42i 123456");
+ assert_matches!(
+ validate_column_limits(&batches, &schema),
+ Err(OverColumnLimit {
+ table_name: _,
+ existing_column_count: 3,
+ merged_column_count: 4,
+ max_columns_per_table: 3,
+ })
+ );
+ }
+
+ // Table exists and is over the column limit because of the race condition,
+ {
+ // Make two schema validator instances each with their own cache
+ let handler1 = SchemaValidator::new(
+ catalog.catalog(),
+ Arc::new(MemoryNamespaceCache::default()),
+ &catalog.metric_registry,
+ );
+ let handler2 = SchemaValidator::new(
+ catalog.catalog(),
+ Arc::new(MemoryNamespaceCache::default()),
+ &catalog.metric_registry,
+ );
+
+ // Make a valid write with one column + timestamp through each validator so the
+ // namespace schema gets cached
+ let writes1_valid = lp_to_writes("dragonfruit val=42i 123456");
+ handler1
+ .write(&*NAMESPACE, writes1_valid, None)
+ .await
+ .expect("request should succeed");
+ let writes2_valid = lp_to_writes("dragonfruit val=43i 123457");
+ handler2
+ .write(&*NAMESPACE, writes2_valid, None)
+ .await
+ .expect("request should succeed");
+
+ // Make "valid" writes through each validator that each add a different column, thus
+ // putting the table over the limit
+ let writes1_add_column = lp_to_writes("dragonfruit,tag1=A val=42i 123456");
+ handler1
+ .write(&*NAMESPACE, writes1_add_column, None)
+ .await
+ .expect("request should succeed");
+ let writes2_add_column = lp_to_writes("dragonfruit,tag2=B val=43i 123457");
+ handler2
+ .write(&*NAMESPACE, writes2_add_column, None)
+ .await
+ .expect("request should succeed");
+
+ let schema = namespace.schema().await;
+
+ // Columns already existing is allowed
+ let batches = lp_to_writes("dragonfruit val=42i 123456");
+ assert!(validate_column_limits(&batches, &schema).is_ok());
+ // Adding more columns over the limit is an error
+ let batches = lp_to_writes("dragonfruit i_got_music=42i 123456");
+ assert_matches!(
+ validate_column_limits(&batches, &schema),
+ Err(OverColumnLimit {
+ table_name: _,
+ existing_column_count: 4,
+ merged_column_count: 5,
+ max_columns_per_table: 3,
+ })
+ );
+ }
+ }
+
// Parse `lp` into a table-keyed MutableBatch map.
fn lp_to_writes(lp: &str) -> HashMap<String, MutableBatch> {
let (writes, _) = mutable_batch_lp::lines_to_batches_stats(lp, 42)
@@ -273,23 +499,11 @@ mod tests {
/// Initialise an in-memory [`MemCatalog`] and create a single namespace
/// named [`NAMESPACE`].
- async fn create_catalog() -> Arc<dyn Catalog> {
- let metrics = Arc::new(metric::Registry::new());
- let catalog: Arc<dyn Catalog> = Arc::new(MemCatalog::new(metrics));
-
- let mut repos = catalog.repositories().await;
- repos
- .namespaces()
- .create(
- NAMESPACE.as_str(),
- "inf",
- TopicId::new(42),
- QueryPoolId::new(24),
- )
- .await
- .expect("failed to create test namespace");
+ async fn test_setup() -> (Arc<TestCatalog>, Arc<TestNamespace>) {
+ let catalog = TestCatalog::new();
+ let namespace = catalog.create_namespace(&NAMESPACE).await;
- catalog
+ (catalog, namespace)
}
fn assert_cache<C>(handler: &SchemaValidator<C>, table: &str, col: &str, want: ColumnType)
@@ -314,10 +528,10 @@ mod tests {
#[tokio::test]
async fn test_write_ok() {
- let catalog = create_catalog().await;
+ let (catalog, _namespace) = test_setup().await;
let metrics = Arc::new(metric::Registry::default());
let handler = SchemaValidator::new(
- catalog,
+ catalog.catalog(),
Arc::new(MemoryNamespaceCache::default()),
&*metrics,
);
@@ -337,10 +551,10 @@ mod tests {
#[tokio::test]
async fn test_write_schema_not_found() {
- let catalog = create_catalog().await;
+ let (catalog, _namespace) = test_setup().await;
let metrics = Arc::new(metric::Registry::default());
let handler = SchemaValidator::new(
- catalog,
+ catalog.catalog(),
Arc::new(MemoryNamespaceCache::default()),
&*metrics,
);
@@ -361,10 +575,10 @@ mod tests {
#[tokio::test]
async fn test_write_validation_failure() {
- let catalog = create_catalog().await;
+ let (catalog, _namespace) = test_setup().await;
let metrics = Arc::new(metric::Registry::default());
let handler = SchemaValidator::new(
- catalog,
+ catalog.catalog(),
Arc::new(MemoryNamespaceCache::default()),
&*metrics,
);
@@ -399,10 +613,10 @@ mod tests {
#[tokio::test]
async fn test_write_table_service_limit() {
- let catalog = create_catalog().await;
+ let (catalog, _namespace) = test_setup().await;
let metrics = Arc::new(metric::Registry::default());
let handler = SchemaValidator::new(
- Arc::clone(&catalog),
+ catalog.catalog(),
Arc::new(MemoryNamespaceCache::default()),
&*metrics,
);
@@ -417,6 +631,7 @@ mod tests {
// Configure the service limit to be hit next request
catalog
+ .catalog()
.repositories()
.await
.namespaces()
@@ -437,10 +652,10 @@ mod tests {
#[tokio::test]
async fn test_write_column_service_limit() {
- let catalog = create_catalog().await;
+ let (catalog, namespace) = test_setup().await;
let metrics = Arc::new(metric::Registry::default());
let handler = SchemaValidator::new(
- Arc::clone(&catalog),
+ catalog.catalog(),
Arc::new(MemoryNamespaceCache::default()),
&*metrics,
);
@@ -453,16 +668,46 @@ mod tests {
.expect("request should succeed");
assert_eq!(writes.len(), got.len());
+ // Configure the service limit to be hit next request
+ namespace.update_column_limit(1).await;
+ let handler = SchemaValidator::new(
+ catalog.catalog(),
+ Arc::new(MemoryNamespaceCache::default()),
+ &*metrics,
+ );
+
+ // Second write attempts to violate limits, causing an error
+ let writes = lp_to_writes("bananas,tag1=A,tag2=B val=42i,val2=42i 123456");
+ let err = handler
+ .write(&*NAMESPACE, writes, None)
+ .await
+ .expect_err("request should fail");
+
+ assert_matches!(err, SchemaError::ServiceLimit(_));
+ assert_eq!(1, handler.service_limit_hit.fetch());
+ }
+
+ #[tokio::test]
+ async fn test_first_write_many_columns_service_limit() {
+ let (catalog, _namespace) = test_setup().await;
+ let metrics = Arc::new(metric::Registry::default());
+ let handler = SchemaValidator::new(
+ catalog.catalog(),
+ Arc::new(MemoryNamespaceCache::default()),
+ &*metrics,
+ );
+
// Configure the service limit to be hit next request
catalog
+ .catalog()
.repositories()
.await
.namespaces()
- .update_column_limit(NAMESPACE.as_str(), 1)
+ .update_column_limit(NAMESPACE.as_str(), 3)
.await
.expect("failed to set column limit");
- // Second write attempts to violate limits, causing an error
+ // First write attempts to add columns over the limit, causing an error
let writes = lp_to_writes("bananas,tag1=A,tag2=B val=42i,val2=42i 123456");
let err = handler
.write(&*NAMESPACE, writes, None)
@@ -478,10 +723,10 @@ mod tests {
const NAMESPACE: &str = "NAMESPACE_IS_NOT_VALIDATED";
const TABLE: &str = "bananas";
- let catalog = create_catalog().await;
+ let (catalog, _namespace) = test_setup().await;
let metrics = Arc::new(metric::Registry::default());
let handler = SchemaValidator::new(
- catalog,
+ catalog.catalog(),
Arc::new(MemoryNamespaceCache::default()),
&*metrics,
);
diff --git a/router/src/namespace_cache/memory.rs b/router/src/namespace_cache/memory.rs
index 602f4a3c4c..ab435f3170 100644
--- a/router/src/namespace_cache/memory.rs
+++ b/router/src/namespace_cache/memory.rs
@@ -42,6 +42,7 @@ mod tests {
topic_id: TopicId::new(24),
query_pool_id: QueryPoolId::new(1234),
tables: Default::default(),
+ max_columns_per_table: 50,
};
assert!(cache.put_schema(ns.clone(), schema1.clone()).is_none());
assert_eq!(*cache.get_schema(&ns).expect("lookup failure"), schema1);
@@ -51,6 +52,7 @@ mod tests {
topic_id: TopicId::new(2),
query_pool_id: QueryPoolId::new(2),
tables: Default::default(),
+ max_columns_per_table: 10,
};
assert_eq!(
diff --git a/router/src/namespace_cache/metrics.rs b/router/src/namespace_cache/metrics.rs
index 57ed04bc44..34f1b5842e 100644
--- a/router/src/namespace_cache/metrics.rs
+++ b/router/src/namespace_cache/metrics.rs
@@ -194,6 +194,7 @@ mod tests {
topic_id: TopicId::new(24),
query_pool_id: QueryPoolId::new(1234),
tables,
+ max_columns_per_table: 100,
}
}
diff --git a/router/src/namespace_cache/sharded_cache.rs b/router/src/namespace_cache/sharded_cache.rs
index 235cd139ae..64b637d59c 100644
--- a/router/src/namespace_cache/sharded_cache.rs
+++ b/router/src/namespace_cache/sharded_cache.rs
@@ -60,6 +60,7 @@ mod tests {
topic_id: TopicId::new(1),
query_pool_id: QueryPoolId::new(1),
tables: Default::default(),
+ max_columns_per_table: 7,
}
}
diff --git a/router/tests/http.rs b/router/tests/http.rs
index 510937d8d8..234761b3c1 100644
--- a/router/tests/http.rs
+++ b/router/tests/http.rs
@@ -330,16 +330,13 @@ async fn test_schema_limit() {
&err,
router::server::http::Error::DmlHandler(
DmlError::Schema(
- SchemaError::ServiceLimit(
- iox_catalog::interface::Error::TableCreateLimitError {
- table_name,
- namespace_id,
- }
- )
+ SchemaError::ServiceLimit(e)
)
) => {
- assert_eq!(table_name, "platanos2");
- assert_eq!(namespace_id.to_string(), "1");
+ assert_eq!(
+ e.to_string(),
+ "couldn't create table platanos2; limit reached on namespace 1"
+ );
}
);
assert_eq!(err.as_status_code(), StatusCode::TOO_MANY_REQUESTS);
|
3114c67cf12e2004fbd1443902aeb15510a3d32d
|
Dom Dwyer
|
2023-05-11 15:54:24
|
persisted Parquet file attribute metrics
|
Implements a PersistCompletionObserver that records various attributes
of the generated and persisted Parquet file as histogram metrics to
capture the distribution of values:
* File size
* Row count
* Column count
* Time range of data (max - min timestamp)
These metrics will give us insight into the generated files instead of
relying on intuition when tuning various configuration parameters.
| null |
feat: persisted Parquet file attribute metrics
Implements a PersistCompletionObserver that records various attributes
of the generated and persisted Parquet file as histogram metrics to
capture the distribution of values:
* File size
* Row count
* Column count
* Time range of data (max - min timestamp)
These metrics will give us insight into the generated files instead of
relying on intuition when tuning various configuration parameters.
|
diff --git a/ingester/src/persist/file_metrics.rs b/ingester/src/persist/file_metrics.rs
new file mode 100644
index 0000000000..d12024b43f
--- /dev/null
+++ b/ingester/src/persist/file_metrics.rs
@@ -0,0 +1,228 @@
+use std::{sync::Arc, time::Duration};
+
+use async_trait::async_trait;
+use metric::{
+ DurationHistogram, DurationHistogramOptions, U64Histogram, U64HistogramOptions, DURATION_MAX,
+};
+
+use super::completion_observer::{CompletedPersist, PersistCompletionObserver};
+
+const MINUTES: Duration = Duration::from_secs(60 * 60);
+
+#[derive(Debug)]
+pub(crate) struct ParquetFileInstrumentation<T> {
+ inner: T,
+
+ row_count: U64Histogram,
+ column_count: U64Histogram,
+ file_size_bytes: U64Histogram,
+ file_time_range: DurationHistogram,
+}
+
+impl<T> ParquetFileInstrumentation<T> {
+ pub(crate) fn new(inner: T, metrics: &metric::Registry) -> Self {
+ // A metric capturing the duration difference between min & max
+ // timestamps.
+ let file_time_range: DurationHistogram = metrics
+ .register_metric_with_options::<DurationHistogram, _>(
+ "ingester_persist_parquet_file_time_range",
+ "range from min to max timestamp in output parquet file",
+ || {
+ DurationHistogramOptions::new([
+ 30 * MINUTES, // 30m
+ 60 * MINUTES, // 1h
+ 120 * MINUTES, // 2h
+ 240 * MINUTES, // 4h
+ 480 * MINUTES, // 8h
+ 960 * MINUTES, // 16h
+ 1_920 * MINUTES, // 32h
+ DURATION_MAX,
+ ])
+ },
+ )
+ .recorder(&[]);
+
+ // File size distribution.
+ let file_size_bytes: U64Histogram = metrics
+ .register_metric_with_options::<U64Histogram, _>(
+ "ingester_persist_parquet_file_size_bytes",
+ "distribution of output parquet file size in bytes",
+ || {
+ U64HistogramOptions::new([
+ 4_u64.pow(5), // 1 kibibyte
+ 4_u64.pow(6), // 4 kibibytes
+ 4_u64.pow(7), // 16 kibibytes
+ 4_u64.pow(8), // 64 kibibytes
+ 4_u64.pow(9), // 256 kibibytes
+ 4_u64.pow(10), // 1 mebibyte
+ 4_u64.pow(11), // 4 mebibytes
+ 4_u64.pow(12), // 16 mebibytes
+ 4_u64.pow(13), // 64 mebibytes
+ 4_u64.pow(14), // 256 mebibytes
+ 4_u64.pow(15), // 1 gibibyte
+ 4_u64.pow(16), // 4 gibibytes
+ u64::MAX,
+ ])
+ },
+ )
+ .recorder(&[]);
+
+ // Row count distribution.
+ let row_count: U64Histogram = metrics
+ .register_metric_with_options::<U64Histogram, _>(
+ "ingester_persist_parquet_file_row_count",
+ "distribution of row count in output parquet files",
+ || {
+ U64HistogramOptions::new([
+ 4_u64.pow(3), // 64
+ 4_u64.pow(4), // 256
+ 4_u64.pow(5), // 1,024
+ 4_u64.pow(6), // 4,096
+ 4_u64.pow(7), // 16,384
+ 4_u64.pow(8), // 65,536
+ 4_u64.pow(9), // 262,144
+ 4_u64.pow(10), // 1,048,576
+ 4_u64.pow(11), // 4,194,304
+ 4_u64.pow(12), // 16,777,216
+ u64::MAX,
+ ])
+ },
+ )
+ .recorder(&[]);
+
+ // Column count distribution.
+ //
+ // Because the column count is, by default, limited per table, this
+ // range should exceed that limit by some degree to discover overshoot
+ // (limits are eventually consistent) and correctly measure workloads
+ // that have been configured with a higher limit.
+ let column_count: U64Histogram = metrics
+ .register_metric_with_options::<U64Histogram, _>(
+ "ingester_persist_parquet_file_column_count",
+ "distribution of column count in output parquet files",
+ || {
+ U64HistogramOptions::new([
+ 2_u64.pow(1), // 2
+ 2_u64.pow(2), // 4
+ 2_u64.pow(3), // 8
+ 2_u64.pow(4), // 16
+ 2_u64.pow(5), // 32
+ 2_u64.pow(6), // 64
+ 2_u64.pow(7), // 128
+ 2_u64.pow(8), // 256
+ 2_u64.pow(9), // 512
+ 2_u64.pow(10), // 1,024
+ 2_u64.pow(11), // 2,048
+ u64::MAX,
+ ])
+ },
+ )
+ .recorder(&[]);
+
+ Self {
+ inner,
+ row_count,
+ column_count,
+ file_size_bytes,
+ file_time_range,
+ }
+ }
+}
+
+#[async_trait]
+impl<T> PersistCompletionObserver for ParquetFileInstrumentation<T>
+where
+ T: PersistCompletionObserver,
+{
+ async fn persist_complete(&self, note: Arc<CompletedPersist>) {
+ // Observe the persistence notification values.
+ self.row_count.record(note.row_count() as _);
+ self.column_count.record(note.column_count() as _);
+ self.file_size_bytes.record(note.parquet_file_bytes() as _);
+ self.file_time_range.record(note.timestamp_range());
+
+ // Forward on the notification to the next handler.
+ self.inner.persist_complete(note).await;
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use std::sync::Arc;
+
+ use data_types::{
+ sequence_number_set::SequenceNumberSet, ColumnId, ColumnSet, NamespaceId,
+ ParquetFileParams, PartitionId, TableId, Timestamp,
+ };
+ use metric::assert_histogram;
+
+ use crate::persist::completion_observer::mock::MockCompletionObserver;
+
+ use super::*;
+
+ const NAMESPACE_ID: NamespaceId = NamespaceId::new(1);
+ const TABLE_ID: TableId = TableId::new(1);
+ const PARTITION_ID: PartitionId = PartitionId::new(1);
+
+ #[tokio::test]
+ async fn test_persisted_file_metrics() {
+ let inner = Arc::new(MockCompletionObserver::default());
+
+ let metrics = metric::Registry::default();
+ let decorator = ParquetFileInstrumentation::new(Arc::clone(&inner), &metrics);
+
+ let meta = ParquetFileParams {
+ namespace_id: NAMESPACE_ID,
+ table_id: TABLE_ID,
+ partition_id: PARTITION_ID,
+ object_store_id: Default::default(),
+ min_time: Timestamp::new(Duration::from_secs(1_000).as_nanos() as _),
+ max_time: Timestamp::new(Duration::from_secs(1_042).as_nanos() as _), // 42 seconds later
+ file_size_bytes: 42424242,
+ row_count: 24,
+ compaction_level: data_types::CompactionLevel::Initial,
+ created_at: Timestamp::new(1234),
+ column_set: ColumnSet::new([1, 2, 3, 4].into_iter().map(ColumnId::new)),
+ max_l0_created_at: Timestamp::new(42),
+ };
+
+ decorator
+ .persist_complete(Arc::new(CompletedPersist::new(
+ meta.clone(),
+ SequenceNumberSet::default(),
+ )))
+ .await;
+
+ assert_histogram!(
+ metrics,
+ DurationHistogram,
+ "ingester_persist_parquet_file_time_range",
+ samples = 1,
+ sum = Duration::from_secs(42),
+ );
+
+ assert_histogram!(
+ metrics,
+ U64Histogram,
+ "ingester_persist_parquet_file_size_bytes",
+ samples = 1,
+ sum = meta.file_size_bytes as u64,
+ );
+
+ assert_histogram!(
+ metrics,
+ U64Histogram,
+ "ingester_persist_parquet_file_row_count",
+ samples = 1,
+ sum = meta.row_count as u64,
+ );
+
+ assert_histogram!(
+ metrics,
+ U64Histogram,
+ "ingester_persist_parquet_file_column_count",
+ samples = 1,
+ sum = meta.column_set.len() as u64,
+ );
+ }
+}
diff --git a/ingester/src/persist/mod.rs b/ingester/src/persist/mod.rs
index 835964ea77..f1456ace3e 100644
--- a/ingester/src/persist/mod.rs
+++ b/ingester/src/persist/mod.rs
@@ -5,6 +5,7 @@ pub(super) mod compact;
pub(crate) mod completion_observer;
mod context;
pub(crate) mod drain_buffer;
+pub(crate) mod file_metrics;
pub(crate) mod handle;
pub(crate) mod hot_partitions;
pub mod queue;
|
adddd71c4d2be39345d7e42368c476a308244593
|
Andrew Lamb
|
2023-02-13 19:54:57
|
Add some comments about fatal panic rationale (#6955)
|
* chore: Add some comments and log messages about fatal panics
* fix: Update panic_logging/src/lib.rs
---------
|
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
chore: Add some comments about fatal panic rationale (#6955)
* chore: Add some comments and log messages about fatal panics
* fix: Update panic_logging/src/lib.rs
---------
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
diff --git a/influxdb_iox/src/commands/run/ingester2.rs b/influxdb_iox/src/commands/run/ingester2.rs
index 632be386f0..6e6105669d 100644
--- a/influxdb_iox/src/commands/run/ingester2.rs
+++ b/influxdb_iox/src/commands/run/ingester2.rs
@@ -93,7 +93,10 @@ pub async fn command(config: Config) -> Result<()> {
);
}
- // Ensure panics are fatal when running in this server mode.
+ // Ensure panics (even in threads or tokio tasks) are fatal when
+ // running in this server mode. This is done to avoid potential
+ // data corruption because there is no foolproof way to recover
+ // state after a panic.
make_panics_fatal();
let common_state = CommonServerState::from_config(config.run_config.clone())?;
diff --git a/influxdb_iox/src/commands/run/router2.rs b/influxdb_iox/src/commands/run/router2.rs
index 61cb269505..e1682955ab 100644
--- a/influxdb_iox/src/commands/run/router2.rs
+++ b/influxdb_iox/src/commands/run/router2.rs
@@ -72,7 +72,10 @@ pub async fn command(config: Config) -> Result<()> {
);
}
- // Ensure panics are fatal when running in this server mode.
+ // Ensure panics (even in threads or tokio tasks) are fatal when
+ // running in this server mode. This is done to avoid potential
+ // data corruption because there is no foolproof way to recover
+ // state after a panic.
make_panics_fatal();
let common_state = CommonServerState::from_config(config.run_config.clone())?;
|
230bf02f93d1ecb3cdf8122416fb4fbe93dba6ab
|
Michael Gattozzi
|
2024-11-19 12:42:20
|
delete old Catalogs on persist (#25568)
|
This commit changes the code so that we only keep the 10 most recent
Catalogs. When a new one is persisted we delete any old ones that
exist. If the deletion would fail we don't panic and let a future
persist cleanup the catalogs rather than failing the persist itself.
This commit also adds a test to make sure that only the catalogs we
expect to are deleted on persist.
| null |
feat: delete old Catalogs on persist (#25568)
This commit changes the code so that we only keep the 10 most recent
Catalogs. When a new one is persisted we delete any old ones that
exist. If the deletion would fail we don't panic and let a future
persist cleanup the catalogs rather than failing the persist itself.
This commit also adds a test to make sure that only the catalogs we
expect to are deleted on persist.
|
diff --git a/influxdb3_write/src/persister.rs b/influxdb3_write/src/persister.rs
index dc714a60d8..8cf2c16ab4 100644
--- a/influxdb3_write/src/persister.rs
+++ b/influxdb3_write/src/persister.rs
@@ -263,6 +263,31 @@ impl Persister {
self.object_store
.put(catalog_path.as_ref(), json.into())
.await?;
+ // It's okay if this fails as it's just cleanup of the old catalog
+ // a new persist will come in and clean the old one up.
+ let prefix = self.host_identifier_prefix.clone();
+ let obj_store = Arc::clone(&self.object_store);
+ tokio::spawn(async move {
+ let mut items = Vec::new();
+ let mut stream = obj_store.list(Some(&format!("{}/catalogs", prefix).into()));
+ while let Some(item) = stream.next().await {
+ match item {
+ Ok(item) => items.push(item),
+ Err(_) => return,
+ }
+ }
+
+ // We want to sort by the newest paths first
+ items.sort_by(|a, b| a.location.cmp(&b.location));
+
+ // We skip over the ten most recent catalogs and delete any leftovers
+ // In most cases this will just be one of them.
+ for item in items.iter().skip(10) {
+ if obj_store.delete(&item.location).await.is_err() {
+ return;
+ }
+ }
+ });
Ok(())
}
@@ -399,10 +424,14 @@ mod tests {
use crate::{DatabaseTables, ParquetFile, ParquetFileId};
use influxdb3_catalog::catalog::CatalogSequenceNumber;
use influxdb3_id::{ColumnId, DbId, SerdeVecMap, TableId};
- use influxdb3_wal::{SnapshotSequenceNumber, WalFileSequenceNumber};
+ use influxdb3_wal::{
+ CatalogBatch, CatalogOp, FieldDataType, FieldDefinition, SnapshotSequenceNumber,
+ TableDefinition, WalFileSequenceNumber,
+ };
use object_store::memory::InMemory;
use observability_deps::tracing::info;
use pretty_assertions::assert_eq;
+ use tokio::time::{sleep, Duration};
use {
arrow::array::Int32Array, arrow::datatypes::DataType, arrow::datatypes::Field,
arrow::datatypes::Schema, chrono::Utc,
@@ -423,6 +452,124 @@ mod tests {
persister.persist_catalog(&catalog).await.unwrap();
}
+ #[tokio::test]
+ async fn persist_catalog_with_cleanup() {
+ let host_id = Arc::from("sample-host-id");
+ let instance_id = Arc::from("sample-instance-id");
+ let prefix = test_helpers::tmp_dir().unwrap();
+ let local_disk = LocalFileSystem::new_with_prefix(prefix).unwrap();
+ let obj_store: Arc<dyn ObjectStore> = Arc::new(local_disk);
+ let persister = Persister::new(Arc::clone(&obj_store), "test_host");
+ let catalog = Catalog::new(Arc::clone(&host_id), instance_id);
+ persister.persist_catalog(&catalog).await.unwrap();
+ let db_schema = catalog.db_or_create("my_db_1").unwrap();
+ persister.persist_catalog(&catalog).await.unwrap();
+ let _ = catalog.db_or_create("my_db_2").unwrap();
+ persister.persist_catalog(&catalog).await.unwrap();
+ let _ = catalog.db_or_create("my_db_3").unwrap();
+ persister.persist_catalog(&catalog).await.unwrap();
+ let _ = catalog.db_or_create("my_db_4").unwrap();
+ persister.persist_catalog(&catalog).await.unwrap();
+ let _ = catalog.db_or_create("my_db_5").unwrap();
+ persister.persist_catalog(&catalog).await.unwrap();
+
+ let batch = |name: &str, num: u32| {
+ let _ = catalog.apply_catalog_batch(&CatalogBatch {
+ database_id: db_schema.id,
+ database_name: Arc::clone(&db_schema.name),
+ time_ns: 5000,
+ ops: vec![CatalogOp::CreateTable(TableDefinition {
+ database_id: db_schema.id,
+ database_name: Arc::clone(&db_schema.name),
+ table_name: name.into(),
+ table_id: TableId::from(num),
+ field_definitions: vec![FieldDefinition {
+ name: "column".into(),
+ id: ColumnId::from(num),
+ data_type: FieldDataType::String,
+ }],
+ key: None,
+ })],
+ });
+ };
+
+ batch("table_zero", 0);
+ persister.persist_catalog(&catalog).await.unwrap();
+ batch("table_one", 1);
+ persister.persist_catalog(&catalog).await.unwrap();
+ batch("table_two", 2);
+ persister.persist_catalog(&catalog).await.unwrap();
+ batch("table_three", 3);
+ persister.persist_catalog(&catalog).await.unwrap();
+
+ // We've persisted the catalog 10 times and nothing has changed
+ // So now we need to persist the catalog two more times and we should
+ // see the first 2 catalogs be dropped.
+ batch("table_four", 4);
+ persister.persist_catalog(&catalog).await.unwrap();
+ batch("table_five", 5);
+ persister.persist_catalog(&catalog).await.unwrap();
+
+ // Make sure the deletions have all ocurred
+ sleep(Duration::from_secs(2)).await;
+
+ let mut stream = obj_store.list(None);
+ let mut items = Vec::new();
+ while let Some(item) = stream.next().await {
+ items.push(item.unwrap());
+ }
+
+ // Sort by oldest fisrt
+ items.sort_by(|a, b| b.location.cmp(&a.location));
+
+ assert_eq!(items.len(), 10);
+ // The first path should contain this number meaning we've
+ // eliminated the first two items
+ assert_eq!(18446744073709551613, u64::MAX - 2);
+
+ // Assert that we have 10 catalogs of decreasing number
+ assert_eq!(
+ items[0].location,
+ "test_host/catalogs/18446744073709551613.json".into()
+ );
+ assert_eq!(
+ items[1].location,
+ "test_host/catalogs/18446744073709551612.json".into()
+ );
+ assert_eq!(
+ items[2].location,
+ "test_host/catalogs/18446744073709551611.json".into()
+ );
+ assert_eq!(
+ items[3].location,
+ "test_host/catalogs/18446744073709551610.json".into()
+ );
+ assert_eq!(
+ items[4].location,
+ "test_host/catalogs/18446744073709551609.json".into()
+ );
+ assert_eq!(
+ items[5].location,
+ "test_host/catalogs/18446744073709551608.json".into()
+ );
+ assert_eq!(
+ items[6].location,
+ "test_host/catalogs/18446744073709551607.json".into()
+ );
+ assert_eq!(
+ items[7].location,
+ "test_host/catalogs/18446744073709551606.json".into()
+ );
+ assert_eq!(
+ items[8].location,
+ "test_host/catalogs/18446744073709551605.json".into()
+ );
+ assert_eq!(
+ items[9].location,
+ "test_host/catalogs/18446744073709551604.json".into()
+ );
+ }
+
#[tokio::test]
async fn persist_and_load_newest_catalog() {
let host_id: Arc<str> = Arc::from("sample-host-id");
|
a2521bbf35af7eee44fa502d6ff59b0ae8967f50
|
Stuart Carnie
|
2023-06-20 16:37:28
|
moving_average, difference and non_negative_difference
|
There is a `todo` regarding `update_batch` to be discussed with @alamb
| null |
feat: moving_average, difference and non_negative_difference
There is a `todo` regarding `update_batch` to be discussed with @alamb
|
diff --git a/influxdb_iox/tests/query_tests/cases.rs b/influxdb_iox/tests/query_tests/cases.rs
index bee8ca4695..486dcf0602 100644
--- a/influxdb_iox/tests/query_tests/cases.rs
+++ b/influxdb_iox/tests/query_tests/cases.rs
@@ -400,6 +400,8 @@ mod influxql {
.await;
}
+ /// Test window-like functions, which utilise user-defined aggregate and
+ /// window functions.
#[tokio::test]
async fn window_like() {
test_helpers::maybe_start_logging();
diff --git a/influxdb_iox/tests/query_tests/cases/in/window_like.influxql b/influxdb_iox/tests/query_tests/cases/in/window_like.influxql
index 55d54bc336..cb3400ed0c 100644
--- a/influxdb_iox/tests/query_tests/cases/in/window_like.influxql
+++ b/influxdb_iox/tests/query_tests/cases/in/window_like.influxql
@@ -4,17 +4,69 @@
--
-- difference
--
-SELECT difference(mean(writes)), mean(writes) FROM diskio WHERE time >= '2020-06-11T16:53:00Z' AND time < '2020-06-11T16:55:00Z' GROUP BY time(7s);
-SELECT difference(mean(writes)), mean(writes) FROM diskio WHERE time >= '2020-06-11T16:53:00Z' AND time < '2020-06-11T16:55:00Z' GROUP BY time(7s) fill(none);
-SELECT difference(mean(writes)), mean(writes) FROM diskio WHERE time >= '2020-06-11T16:53:00Z' AND time < '2020-06-11T16:55:00Z' GROUP BY time(7s) fill(0);
-SELECT difference(mean(writes)), mean(writes) FROM diskio WHERE time >= '2020-06-11T16:53:00Z' AND time < '2020-06-11T16:55:00Z' GROUP BY time(7s) fill(previous);
-SELECT difference(mean(writes)), mean(writes) FROM diskio WHERE time >= '2020-06-11T16:53:00Z' AND time < '2020-06-11T16:55:00Z' GROUP BY time(7s) fill(linear);
+SELECT difference(writes) FROM diskio WHERE time >= '2020-06-11T16:53:00Z' AND time < '2020-06-11T16:55:00Z';
+-- group by a tag
+SELECT difference(usage_idle) FROM cpu WHERE time >= '2020-06-11T16:53:00Z' AND time < '2020-06-11T16:55:00Z' AND cpu =~ /^cpu(0|1)$/ GROUP BY cpu;
--
--- moving_average
+-- difference + aggregate
--
-SELECT moving_average(mean(writes), 3), mean(writes) FROM diskio WHERE time >= '2020-06-11T16:53:00Z' AND time < '2020-06-11T16:55:00Z' GROUP BY time(7s);
-SELECT moving_average(mean(writes), 3), mean(writes) FROM diskio WHERE time >= '2020-06-11T16:53:00Z' AND time < '2020-06-11T16:55:00Z' GROUP BY time(7s) fill(none);
-SELECT moving_average(mean(writes), 3), mean(writes) FROM diskio WHERE time >= '2020-06-11T16:53:00Z' AND time < '2020-06-11T16:55:00Z' GROUP BY time(7s) fill(0);
-SELECT moving_average(mean(writes), 3), mean(writes) FROM diskio WHERE time >= '2020-06-11T16:53:00Z' AND time < '2020-06-11T16:55:00Z' GROUP BY time(7s) fill(previous);
-SELECT moving_average(mean(writes), 3), mean(writes) FROM diskio WHERE time >= '2020-06-11T16:53:00Z' AND time < '2020-06-11T16:55:00Z' GROUP BY time(7s) fill(linear);
\ No newline at end of file
+SELECT difference(mean(writes)) FROM diskio WHERE time >= '2020-06-11T16:53:00Z' AND time < '2020-06-11T16:55:00Z' GROUP BY time(7s);
+SELECT difference(mean(writes)) FROM diskio WHERE time >= '2020-06-11T16:53:00Z' AND time < '2020-06-11T16:55:00Z' GROUP BY time(30s);
+-- the input data is regular data at 10s intervals, so 7s windows ensure the `mean` generates windows with NULL values to test NULL handling of difference
+SELECT difference(mean(writes)) FROM diskio WHERE time >= '2020-06-11T16:53:00Z' AND time < '2020-06-11T16:55:00Z' GROUP BY time(7s) fill(0);
+SELECT difference(mean(writes)) FROM diskio WHERE time >= '2020-06-11T16:53:00Z' AND time < '2020-06-11T16:55:00Z' GROUP BY time(7s) fill(previous);
+SELECT difference(mean(writes)) FROM diskio WHERE time >= '2020-06-11T16:53:00Z' AND time < '2020-06-11T16:55:00Z' GROUP BY time(7s) fill(linear);
+-- group by time and a tag
+SELECT difference(mean(usage_idle)) FROM cpu WHERE time >= '2020-06-11T16:53:00Z' AND time < '2020-06-11T16:55:00Z' AND cpu =~ /^cpu(0|1)$/ GROUP BY TIME(30s), cpu;
+
+
+--
+-- non_negative_difference
+--
+SELECT non_negative_difference(usage_idle) FROM cpu WHERE time >= '2020-06-11T16:53:00Z' AND time < '2020-06-11T16:55:00Z' AND cpu = 'cpu0';
+SELECT non_negative_difference(usage_idle) FROM cpu WHERE time >= '2020-06-11T16:53:00Z' AND time < '2020-06-11T16:55:00Z' AND cpu =~ /^cpu(0|1)$/ GROUP BY cpu;
+
+--
+-- non_negative_difference + aggregate
+--
+SELECT non_negative_difference(mean(usage_idle)) FROM cpu WHERE time >= '2020-06-11T16:53:00Z' AND time < '2020-06-11T16:55:00Z' AND cpu =~ /^cpu(0|1)$/ GROUP BY TIME(30s), cpu;
+
+--
+-- moving_average + aggregate
+--
+-- the input data is regular data at 10s intervals, so 7s windows ensure the `mean` generates windows with NULL values to test NULL handling of moving_average
+SELECT moving_average(mean(writes), 3) FROM diskio WHERE time >= '2020-06-11T16:53:00Z' AND time < '2020-06-11T16:55:00Z' GROUP BY time(7s);
+SELECT moving_average(mean(writes), 3) FROM diskio WHERE time >= '2020-06-11T16:53:00Z' AND time < '2020-06-11T16:55:00Z' GROUP BY time(7s) fill(0);
+SELECT moving_average(mean(writes), 3) FROM diskio WHERE time >= '2020-06-11T16:53:00Z' AND time < '2020-06-11T16:55:00Z' GROUP BY time(7s) fill(previous);
+SELECT moving_average(mean(writes), 3) FROM diskio WHERE time >= '2020-06-11T16:53:00Z' AND time < '2020-06-11T16:55:00Z' GROUP BY time(7s) fill(linear);
+
+--
+-- combining window functions
+--
+SELECT difference(usage_idle), non_negative_difference(usage_idle), moving_average(usage_idle, 4) FROM cpu WHERE time >= '2020-06-11T16:53:00Z' AND time < '2020-06-11T16:55:00Z' AND cpu =~ /^cpu(0|1)$/ GROUP BY cpu;
+-- aggregate + window
+SELECT difference(mean(usage_idle)), non_negative_difference(mean(usage_idle)), moving_average(mean(usage_idle), 4) FROM cpu WHERE time >= '2020-06-11T16:53:00Z' AND time < '2020-06-11T16:55:00Z' AND cpu =~ /^cpu(0|1)$/ GROUP BY TIME(30s), cpu;
+
+--
+-- The following queries contain projections of window + aggregate and aggregate functions,
+-- and appear to be uncommon. They currently produce additional rows, which do not match
+-- InfluxQL OG and will require further investigation.
+
+--
+-- difference with mixed aggregate
+--
+-- SELECT difference(mean(writes)), mean(writes) FROM diskio WHERE time >= '2020-06-11T16:53:00Z' AND time < '2020-06-11T16:55:00Z' GROUP BY time(7s);
+-- SELECT difference(mean(writes)), mean(writes) FROM diskio WHERE time >= '2020-06-11T16:53:00Z' AND time < '2020-06-11T16:55:00Z' GROUP BY time(7s) fill(none);
+-- SELECT difference(mean(writes)), mean(writes) FROM diskio WHERE time >= '2020-06-11T16:53:00Z' AND time < '2020-06-11T16:55:00Z' GROUP BY time(7s) fill(0);
+-- SELECT difference(mean(writes)), mean(writes) FROM diskio WHERE time >= '2020-06-11T16:53:00Z' AND time < '2020-06-11T16:55:00Z' GROUP BY time(7s) fill(previous);
+-- SELECT difference(mean(writes)), mean(writes) FROM diskio WHERE time >= '2020-06-11T16:53:00Z' AND time < '2020-06-11T16:55:00Z' GROUP BY time(7s) fill(linear);
+
+--
+-- moving_average with mixed aggregate
+--
+-- SELECT moving_average(mean(writes), 3), mean(writes) FROM diskio WHERE time >= '2020-06-11T16:53:00Z' AND time < '2020-06-11T16:55:00Z' GROUP BY time(7s);
+-- SELECT moving_average(mean(writes), 3), mean(writes) FROM diskio WHERE time >= '2020-06-11T16:53:00Z' AND time < '2020-06-11T16:55:00Z' GROUP BY time(7s) fill(none);
+-- SELECT moving_average(mean(writes), 3), mean(writes) FROM diskio WHERE time >= '2020-06-11T16:53:00Z' AND time < '2020-06-11T16:55:00Z' GROUP BY time(7s) fill(0);
+-- SELECT moving_average(mean(writes), 3), mean(writes) FROM diskio WHERE time >= '2020-06-11T16:53:00Z' AND time < '2020-06-11T16:55:00Z' GROUP BY time(7s) fill(previous);
+-- SELECT moving_average(mean(writes), 3), mean(writes) FROM diskio WHERE time >= '2020-06-11T16:53:00Z' AND time < '2020-06-11T16:55:00Z' GROUP BY time(7s) fill(linear);
\ No newline at end of file
diff --git a/influxdb_iox/tests/query_tests/cases/in/window_like.influxql.expected b/influxdb_iox/tests/query_tests/cases/in/window_like.influxql.expected
index c776ccc52c..77c6038cd1 100644
--- a/influxdb_iox/tests/query_tests/cases/in/window_like.influxql.expected
+++ b/influxdb_iox/tests/query_tests/cases/in/window_like.influxql.expected
@@ -1,151 +1,314 @@
-- Test Setup: window_like
--- InfluxQL: SELECT difference(mean(writes)), mean(writes) FROM diskio WHERE time >= '2020-06-11T16:53:00Z' AND time < '2020-06-11T16:55:00Z' GROUP BY time(7s);
+-- InfluxQL: SELECT difference(writes) FROM diskio WHERE time >= '2020-06-11T16:53:00Z' AND time < '2020-06-11T16:55:00Z';
name: diskio
-+---------------------+------------+-----------+
-| time | difference | mean |
-+---------------------+------------+-----------+
-| 2020-06-11T16:52:54 | | 5592646.0 |
-| 2020-06-11T16:53:01 | | |
-| 2020-06-11T16:53:08 | 164.0 | 5592810.0 |
-| 2020-06-11T16:53:15 | 187.0 | 5592997.0 |
-| 2020-06-11T16:53:22 | | |
-| 2020-06-11T16:53:29 | 112.0 | 5593109.0 |
-| 2020-06-11T16:53:36 | 110.0 | 5593219.0 |
-| 2020-06-11T16:53:43 | | |
-| 2020-06-11T16:53:50 | 219.0 | 5593438.0 |
-| 2020-06-11T16:53:57 | 75.0 | 5593513.0 |
-| 2020-06-11T16:54:04 | 76.0 | 5593589.0 |
-| 2020-06-11T16:54:11 | | |
-| 2020-06-11T16:54:18 | 146.0 | 5593735.0 |
-| 2020-06-11T16:54:25 | | |
-| 2020-06-11T16:54:32 | | |
-| 2020-06-11T16:54:39 | | |
-| 2020-06-11T16:54:46 | | |
-| 2020-06-11T16:54:53 | | |
-+---------------------+------------+-----------+
--- InfluxQL: SELECT difference(mean(writes)), mean(writes) FROM diskio WHERE time >= '2020-06-11T16:53:00Z' AND time < '2020-06-11T16:55:00Z' GROUP BY time(7s) fill(none);
++---------------------+------------+
+| time | difference |
++---------------------+------------+
+| 2020-06-11T16:53:10 | 164 |
+| 2020-06-11T16:53:20 | 187 |
+| 2020-06-11T16:53:30 | 112 |
+| 2020-06-11T16:53:40 | 110 |
+| 2020-06-11T16:53:50 | 219 |
+| 2020-06-11T16:54:00 | 75 |
+| 2020-06-11T16:54:10 | 76 |
+| 2020-06-11T16:54:20 | 146 |
++---------------------+------------+
+-- InfluxQL: SELECT difference(usage_idle) FROM cpu WHERE time >= '2020-06-11T16:53:00Z' AND time < '2020-06-11T16:55:00Z' AND cpu =~ /^cpu(0|1)$/ GROUP BY cpu;
+name: cpu
+tags: cpu=cpu0
++---------------------+----------------------+
+| time | difference |
++---------------------+----------------------+
+| 2020-06-11T16:53:10 | 2.976802976802972 |
+| 2020-06-11T16:53:20 | -0.878900878900879 |
+| 2020-06-11T16:53:30 | 1.3890109890109983 |
+| 2020-06-11T16:53:40 | -0.10970970970971905 |
+| 2020-06-11T16:53:50 | -0.4902902902902895 |
+| 2020-06-11T16:54:00 | 0.29009009009008935 |
+| 2020-06-11T16:54:10 | -1.2677348006689328 |
++---------------------+----------------------+
+name: cpu
+tags: cpu=cpu1
++---------------------+------------------------+
+| time | difference |
++---------------------+------------------------+
+| 2020-06-11T16:53:10 | -0.09980019980019961 |
+| 2020-06-11T16:53:20 | 0.09980019980019961 |
+| 2020-06-11T16:53:30 | 0.0 |
+| 2020-06-11T16:53:40 | -0.0001001001001128543 |
+| 2020-06-11T16:53:50 | -0.09970009970008675 |
+| 2020-06-11T16:54:00 | 0.09959979899859661 |
+| 2020-06-11T16:54:10 | -0.09959979899859661 |
++---------------------+------------------------+
+-- InfluxQL: SELECT difference(mean(writes)) FROM diskio WHERE time >= '2020-06-11T16:53:00Z' AND time < '2020-06-11T16:55:00Z' GROUP BY time(7s);
name: diskio
-+---------------------+------------+-----------+
-| time | difference | mean |
-+---------------------+------------+-----------+
-| 2020-06-11T16:52:54 | | 5592646.0 |
-| 2020-06-11T16:53:08 | 164.0 | 5592810.0 |
-| 2020-06-11T16:53:15 | 187.0 | 5592997.0 |
-| 2020-06-11T16:53:29 | 112.0 | 5593109.0 |
-| 2020-06-11T16:53:36 | 110.0 | 5593219.0 |
-| 2020-06-11T16:53:50 | 219.0 | 5593438.0 |
-| 2020-06-11T16:53:57 | 75.0 | 5593513.0 |
-| 2020-06-11T16:54:04 | 76.0 | 5593589.0 |
-| 2020-06-11T16:54:18 | 146.0 | 5593735.0 |
-+---------------------+------------+-----------+
--- InfluxQL: SELECT difference(mean(writes)), mean(writes) FROM diskio WHERE time >= '2020-06-11T16:53:00Z' AND time < '2020-06-11T16:55:00Z' GROUP BY time(7s) fill(previous);
++---------------------+------------+
+| time | difference |
++---------------------+------------+
+| 2020-06-11T16:53:08 | 164.0 |
+| 2020-06-11T16:53:15 | 187.0 |
+| 2020-06-11T16:53:29 | 112.0 |
+| 2020-06-11T16:53:36 | 110.0 |
+| 2020-06-11T16:53:50 | 219.0 |
+| 2020-06-11T16:53:57 | 75.0 |
+| 2020-06-11T16:54:04 | 76.0 |
+| 2020-06-11T16:54:18 | 146.0 |
++---------------------+------------+
+-- InfluxQL: SELECT difference(mean(writes)) FROM diskio WHERE time >= '2020-06-11T16:53:00Z' AND time < '2020-06-11T16:55:00Z' GROUP BY time(30s);
name: diskio
-+---------------------+------------+-----------+
-| time | difference | mean |
-+---------------------+------------+-----------+
-| 2020-06-11T16:52:54 | | 5592646.0 |
-| 2020-06-11T16:53:01 | 0.0 | 5592646.0 |
-| 2020-06-11T16:53:08 | 164.0 | 5592810.0 |
-| 2020-06-11T16:53:15 | 187.0 | 5592997.0 |
-| 2020-06-11T16:53:22 | 0.0 | 5592997.0 |
-| 2020-06-11T16:53:29 | 112.0 | 5593109.0 |
-| 2020-06-11T16:53:36 | 110.0 | 5593219.0 |
-| 2020-06-11T16:53:43 | 0.0 | 5593219.0 |
-| 2020-06-11T16:53:50 | 219.0 | 5593438.0 |
-| 2020-06-11T16:53:57 | 75.0 | 5593513.0 |
-| 2020-06-11T16:54:04 | 76.0 | 5593589.0 |
-| 2020-06-11T16:54:11 | 0.0 | 5593589.0 |
-| 2020-06-11T16:54:18 | 146.0 | 5593735.0 |
-| 2020-06-11T16:54:25 | 0.0 | 5593735.0 |
-| 2020-06-11T16:54:32 | 0.0 | 5593735.0 |
-| 2020-06-11T16:54:39 | 0.0 | 5593735.0 |
-| 2020-06-11T16:54:46 | 0.0 | 5593735.0 |
-| 2020-06-11T16:54:53 | 0.0 | 5593735.0 |
-+---------------------+------------+-----------+
--- InfluxQL: SELECT difference(mean(writes)), mean(writes) FROM diskio WHERE time >= '2020-06-11T16:53:00Z' AND time < '2020-06-11T16:55:00Z' GROUP BY time(7s) fill(linear);
++---------------------+-------------------+
+| time | difference |
++---------------------+-------------------+
+| 2020-06-11T16:53:00 | 433.6666666669771 |
+| 2020-06-11T16:53:30 | 437.6666666660458 |
+| 2020-06-11T16:54:00 | 357.0 |
++---------------------+-------------------+
+-- InfluxQL: SELECT difference(mean(writes)) FROM diskio WHERE time >= '2020-06-11T16:53:00Z' AND time < '2020-06-11T16:55:00Z' GROUP BY time(7s) fill(0);
name: diskio
-+---------------------+------------+-----------+
-| time | difference | mean |
-+---------------------+------------+-----------+
-| 2020-06-11T16:52:54 | | 5592646.0 |
-| 2020-06-11T16:53:01 | 82.0 | 5592728.0 |
-| 2020-06-11T16:53:08 | 82.0 | 5592810.0 |
-| 2020-06-11T16:53:15 | 187.0 | 5592997.0 |
-| 2020-06-11T16:53:22 | 56.0 | 5593053.0 |
-| 2020-06-11T16:53:29 | 56.0 | 5593109.0 |
-| 2020-06-11T16:53:36 | 110.0 | 5593219.0 |
-| 2020-06-11T16:53:43 | 109.5 | 5593328.5 |
-| 2020-06-11T16:53:50 | 109.5 | 5593438.0 |
-| 2020-06-11T16:53:57 | 75.0 | 5593513.0 |
-| 2020-06-11T16:54:04 | 76.0 | 5593589.0 |
-| 2020-06-11T16:54:11 | 73.0 | 5593662.0 |
-| 2020-06-11T16:54:18 | 73.0 | 5593735.0 |
-| 2020-06-11T16:54:25 | | |
-| 2020-06-11T16:54:32 | | |
-| 2020-06-11T16:54:39 | | |
-| 2020-06-11T16:54:46 | | |
-| 2020-06-11T16:54:53 | | |
-+---------------------+------------+-----------+
--- InfluxQL: SELECT moving_average(mean(writes), 3), mean(writes) FROM diskio WHERE time >= '2020-06-11T16:53:00Z' AND time < '2020-06-11T16:55:00Z' GROUP BY time(7s) fill(none);
++---------------------+------------+
+| time | difference |
++---------------------+------------+
+| 2020-06-11T16:52:54 | 5592646.0 |
+| 2020-06-11T16:53:01 | -5592646.0 |
+| 2020-06-11T16:53:08 | 5592810.0 |
+| 2020-06-11T16:53:15 | 187.0 |
+| 2020-06-11T16:53:22 | -5592997.0 |
+| 2020-06-11T16:53:29 | 5593109.0 |
+| 2020-06-11T16:53:36 | 110.0 |
+| 2020-06-11T16:53:43 | -5593219.0 |
+| 2020-06-11T16:53:50 | 5593438.0 |
+| 2020-06-11T16:53:57 | 75.0 |
+| 2020-06-11T16:54:04 | 76.0 |
+| 2020-06-11T16:54:11 | -5593589.0 |
+| 2020-06-11T16:54:18 | 5593735.0 |
+| 2020-06-11T16:54:25 | -5593735.0 |
+| 2020-06-11T16:54:32 | 0.0 |
+| 2020-06-11T16:54:39 | 0.0 |
+| 2020-06-11T16:54:46 | 0.0 |
+| 2020-06-11T16:54:53 | 0.0 |
++---------------------+------------+
+-- InfluxQL: SELECT difference(mean(writes)) FROM diskio WHERE time >= '2020-06-11T16:53:00Z' AND time < '2020-06-11T16:55:00Z' GROUP BY time(7s) fill(previous);
name: diskio
-+---------------------+-------------------+-----------+
-| time | moving_average | mean |
-+---------------------+-------------------+-----------+
-| 2020-06-11T16:52:54 | | 5592646.0 |
-| 2020-06-11T16:53:08 | | 5592810.0 |
-| 2020-06-11T16:53:15 | 5592817.666666667 | 5592997.0 |
-| 2020-06-11T16:53:29 | 5592972.0 | 5593109.0 |
-| 2020-06-11T16:53:36 | 5593108.333333333 | 5593219.0 |
-| 2020-06-11T16:53:50 | 5593255.333333333 | 5593438.0 |
-| 2020-06-11T16:53:57 | 5593390.0 | 5593513.0 |
-| 2020-06-11T16:54:04 | 5593513.333333333 | 5593589.0 |
-| 2020-06-11T16:54:18 | 5593612.333333333 | 5593735.0 |
-+---------------------+-------------------+-----------+
--- InfluxQL: SELECT moving_average(mean(writes), 3), mean(writes) FROM diskio WHERE time >= '2020-06-11T16:53:00Z' AND time < '2020-06-11T16:55:00Z' GROUP BY time(7s) fill(previous);
++---------------------+------------+
+| time | difference |
++---------------------+------------+
+| 2020-06-11T16:53:01 | 0.0 |
+| 2020-06-11T16:53:08 | 164.0 |
+| 2020-06-11T16:53:15 | 187.0 |
+| 2020-06-11T16:53:22 | 0.0 |
+| 2020-06-11T16:53:29 | 112.0 |
+| 2020-06-11T16:53:36 | 110.0 |
+| 2020-06-11T16:53:43 | 0.0 |
+| 2020-06-11T16:53:50 | 219.0 |
+| 2020-06-11T16:53:57 | 75.0 |
+| 2020-06-11T16:54:04 | 76.0 |
+| 2020-06-11T16:54:11 | 0.0 |
+| 2020-06-11T16:54:18 | 146.0 |
+| 2020-06-11T16:54:25 | 0.0 |
+| 2020-06-11T16:54:32 | 0.0 |
+| 2020-06-11T16:54:39 | 0.0 |
+| 2020-06-11T16:54:46 | 0.0 |
+| 2020-06-11T16:54:53 | 0.0 |
++---------------------+------------+
+-- InfluxQL: SELECT difference(mean(writes)) FROM diskio WHERE time >= '2020-06-11T16:53:00Z' AND time < '2020-06-11T16:55:00Z' GROUP BY time(7s) fill(linear);
name: diskio
-+---------------------+-------------------+-----------+
-| time | moving_average | mean |
-+---------------------+-------------------+-----------+
-| 2020-06-11T16:52:54 | | 5592646.0 |
-| 2020-06-11T16:53:01 | | 5592646.0 |
-| 2020-06-11T16:53:08 | 5592700.666666667 | 5592810.0 |
-| 2020-06-11T16:53:15 | 5592817.666666667 | 5592997.0 |
-| 2020-06-11T16:53:22 | 5592934.666666667 | 5592997.0 |
-| 2020-06-11T16:53:29 | 5593034.333333333 | 5593109.0 |
-| 2020-06-11T16:53:36 | 5593108.333333333 | 5593219.0 |
-| 2020-06-11T16:53:43 | 5593182.333333333 | 5593219.0 |
-| 2020-06-11T16:53:50 | 5593292.0 | 5593438.0 |
-| 2020-06-11T16:53:57 | 5593390.0 | 5593513.0 |
-| 2020-06-11T16:54:04 | 5593513.333333333 | 5593589.0 |
-| 2020-06-11T16:54:11 | 5593563.666666667 | 5593589.0 |
-| 2020-06-11T16:54:18 | 5593637.666666667 | 5593735.0 |
-| 2020-06-11T16:54:25 | 5593686.333333333 | 5593735.0 |
-| 2020-06-11T16:54:32 | 5593735.0 | 5593735.0 |
-| 2020-06-11T16:54:39 | 5593735.0 | 5593735.0 |
-| 2020-06-11T16:54:46 | 5593735.0 | 5593735.0 |
-| 2020-06-11T16:54:53 | 5593735.0 | 5593735.0 |
-+---------------------+-------------------+-----------+
--- InfluxQL: SELECT moving_average(mean(writes), 3), mean(writes) FROM diskio WHERE time >= '2020-06-11T16:53:00Z' AND time < '2020-06-11T16:55:00Z' GROUP BY time(7s) fill(linear);
++---------------------+------------+
+| time | difference |
++---------------------+------------+
+| 2020-06-11T16:53:01 | 82.0 |
+| 2020-06-11T16:53:08 | 82.0 |
+| 2020-06-11T16:53:15 | 187.0 |
+| 2020-06-11T16:53:22 | 56.0 |
+| 2020-06-11T16:53:29 | 56.0 |
+| 2020-06-11T16:53:36 | 110.0 |
+| 2020-06-11T16:53:43 | 109.5 |
+| 2020-06-11T16:53:50 | 109.5 |
+| 2020-06-11T16:53:57 | 75.0 |
+| 2020-06-11T16:54:04 | 76.0 |
+| 2020-06-11T16:54:11 | 73.0 |
+| 2020-06-11T16:54:18 | 73.0 |
++---------------------+------------+
+-- InfluxQL: SELECT difference(mean(usage_idle)) FROM cpu WHERE time >= '2020-06-11T16:53:00Z' AND time < '2020-06-11T16:55:00Z' AND cpu =~ /^cpu(0|1)$/ GROUP BY TIME(30s), cpu;
+name: cpu
+tags: cpu=cpu0
++---------------------+---------------------+
+| time | difference |
++---------------------+---------------------+
+| 2020-06-11T16:53:00 | 1.9489658560635377 |
+| 2020-06-11T16:53:30 | 1.5587748254415033 |
+| 2020-06-11T16:54:00 | -0.7072074070078145 |
++---------------------+---------------------+
+name: cpu
+tags: cpu=cpu1
++---------------------+------------------------+
+| time | difference |
++---------------------+------------------------+
+| 2020-06-11T16:53:00 | 0.03313852030396447 |
+| 2020-06-11T16:53:30 | -0.0000333667000376181 |
+| 2020-06-11T16:54:00 | -0.0167002003341139 |
++---------------------+------------------------+
+-- InfluxQL: SELECT non_negative_difference(usage_idle) FROM cpu WHERE time >= '2020-06-11T16:53:00Z' AND time < '2020-06-11T16:55:00Z' AND cpu = 'cpu0';
+name: cpu
++---------------------+-------------------------+
+| time | non_negative_difference |
++---------------------+-------------------------+
+| 2020-06-11T16:53:10 | 2.976802976802972 |
+| 2020-06-11T16:53:30 | 1.3890109890109983 |
+| 2020-06-11T16:54:00 | 0.29009009009008935 |
++---------------------+-------------------------+
+-- InfluxQL: SELECT non_negative_difference(usage_idle) FROM cpu WHERE time >= '2020-06-11T16:53:00Z' AND time < '2020-06-11T16:55:00Z' AND cpu =~ /^cpu(0|1)$/ GROUP BY cpu;
+name: cpu
+tags: cpu=cpu0
++---------------------+-------------------------+
+| time | non_negative_difference |
++---------------------+-------------------------+
+| 2020-06-11T16:53:10 | 2.976802976802972 |
+| 2020-06-11T16:53:30 | 1.3890109890109983 |
+| 2020-06-11T16:54:00 | 0.29009009009008935 |
++---------------------+-------------------------+
+name: cpu
+tags: cpu=cpu1
++---------------------+-------------------------+
+| time | non_negative_difference |
++---------------------+-------------------------+
+| 2020-06-11T16:53:20 | 0.09980019980019961 |
+| 2020-06-11T16:53:30 | 0.0 |
+| 2020-06-11T16:54:00 | 0.09959979899859661 |
++---------------------+-------------------------+
+-- InfluxQL: SELECT non_negative_difference(mean(usage_idle)) FROM cpu WHERE time >= '2020-06-11T16:53:00Z' AND time < '2020-06-11T16:55:00Z' AND cpu =~ /^cpu(0|1)$/ GROUP BY TIME(30s), cpu;
+name: cpu
+tags: cpu=cpu0
++---------------------+-------------------------+
+| time | non_negative_difference |
++---------------------+-------------------------+
+| 2020-06-11T16:53:00 | 1.9489658560635377 |
+| 2020-06-11T16:53:30 | 1.5587748254415033 |
++---------------------+-------------------------+
+name: cpu
+tags: cpu=cpu1
++---------------------+-------------------------+
+| time | non_negative_difference |
++---------------------+-------------------------+
+| 2020-06-11T16:53:00 | 0.03313852030396447 |
++---------------------+-------------------------+
+-- InfluxQL: SELECT moving_average(mean(writes), 3) FROM diskio WHERE time >= '2020-06-11T16:53:00Z' AND time < '2020-06-11T16:55:00Z' GROUP BY time(7s);
name: diskio
-+---------------------+-------------------+-----------+
-| time | moving_average | mean |
-+---------------------+-------------------+-----------+
-| 2020-06-11T16:52:54 | | 5592646.0 |
-| 2020-06-11T16:53:01 | | 5592728.0 |
-| 2020-06-11T16:53:08 | 5592728.0 | 5592810.0 |
-| 2020-06-11T16:53:15 | 5592845.0 | 5592997.0 |
-| 2020-06-11T16:53:22 | 5592953.333333333 | 5593053.0 |
-| 2020-06-11T16:53:29 | 5593053.0 | 5593109.0 |
-| 2020-06-11T16:53:36 | 5593127.0 | 5593219.0 |
-| 2020-06-11T16:53:43 | 5593218.833333333 | 5593328.5 |
-| 2020-06-11T16:53:50 | 5593328.5 | 5593438.0 |
-| 2020-06-11T16:53:57 | 5593426.5 | 5593513.0 |
-| 2020-06-11T16:54:04 | 5593513.333333333 | 5593589.0 |
-| 2020-06-11T16:54:11 | 5593588.0 | 5593662.0 |
-| 2020-06-11T16:54:18 | 5593662.0 | 5593735.0 |
-| 2020-06-11T16:54:25 | | |
-| 2020-06-11T16:54:32 | | |
-| 2020-06-11T16:54:39 | | |
-| 2020-06-11T16:54:46 | | |
-| 2020-06-11T16:54:53 | | |
-+---------------------+-------------------+-----------+
\ No newline at end of file
++---------------------+-------------------+
+| time | moving_average |
++---------------------+-------------------+
+| 2020-06-11T16:53:15 | 5592817.666666667 |
+| 2020-06-11T16:53:29 | 5592972.0 |
+| 2020-06-11T16:53:36 | 5593108.333333333 |
+| 2020-06-11T16:53:50 | 5593255.333333333 |
+| 2020-06-11T16:53:57 | 5593390.0 |
+| 2020-06-11T16:54:04 | 5593513.333333333 |
+| 2020-06-11T16:54:18 | 5593612.333333333 |
++---------------------+-------------------+
+-- InfluxQL: SELECT moving_average(mean(writes), 3) FROM diskio WHERE time >= '2020-06-11T16:53:00Z' AND time < '2020-06-11T16:55:00Z' GROUP BY time(7s) fill(0);
+name: diskio
++---------------------+--------------------+
+| time | moving_average |
++---------------------+--------------------+
+| 2020-06-11T16:53:01 | 1864215.3333333333 |
+| 2020-06-11T16:53:08 | 3728485.3333333335 |
+| 2020-06-11T16:53:15 | 3728602.3333333335 |
+| 2020-06-11T16:53:22 | 3728602.3333333335 |
+| 2020-06-11T16:53:29 | 3728702.0 |
+| 2020-06-11T16:53:36 | 3728776.0 |
+| 2020-06-11T16:53:43 | 3728776.0 |
+| 2020-06-11T16:53:50 | 3728885.6666666665 |
+| 2020-06-11T16:53:57 | 3728983.6666666665 |
+| 2020-06-11T16:54:04 | 5593513.333333333 |
+| 2020-06-11T16:54:11 | 3729034.0 |
+| 2020-06-11T16:54:18 | 3729108.0 |
+| 2020-06-11T16:54:25 | 1864578.3333333333 |
+| 2020-06-11T16:54:32 | 1864578.3333333333 |
+| 2020-06-11T16:54:39 | 0.0 |
+| 2020-06-11T16:54:46 | 0.0 |
+| 2020-06-11T16:54:53 | 0.0 |
++---------------------+--------------------+
+-- InfluxQL: SELECT moving_average(mean(writes), 3) FROM diskio WHERE time >= '2020-06-11T16:53:00Z' AND time < '2020-06-11T16:55:00Z' GROUP BY time(7s) fill(previous);
+name: diskio
++---------------------+-------------------+
+| time | moving_average |
++---------------------+-------------------+
+| 2020-06-11T16:53:08 | 5592700.666666667 |
+| 2020-06-11T16:53:15 | 5592817.666666667 |
+| 2020-06-11T16:53:22 | 5592934.666666667 |
+| 2020-06-11T16:53:29 | 5593034.333333333 |
+| 2020-06-11T16:53:36 | 5593108.333333333 |
+| 2020-06-11T16:53:43 | 5593182.333333333 |
+| 2020-06-11T16:53:50 | 5593292.0 |
+| 2020-06-11T16:53:57 | 5593390.0 |
+| 2020-06-11T16:54:04 | 5593513.333333333 |
+| 2020-06-11T16:54:11 | 5593563.666666667 |
+| 2020-06-11T16:54:18 | 5593637.666666667 |
+| 2020-06-11T16:54:25 | 5593686.333333333 |
+| 2020-06-11T16:54:32 | 5593735.0 |
+| 2020-06-11T16:54:39 | 5593735.0 |
+| 2020-06-11T16:54:46 | 5593735.0 |
+| 2020-06-11T16:54:53 | 5593735.0 |
++---------------------+-------------------+
+-- InfluxQL: SELECT moving_average(mean(writes), 3) FROM diskio WHERE time >= '2020-06-11T16:53:00Z' AND time < '2020-06-11T16:55:00Z' GROUP BY time(7s) fill(linear);
+name: diskio
++---------------------+-------------------+
+| time | moving_average |
++---------------------+-------------------+
+| 2020-06-11T16:53:08 | 5592728.0 |
+| 2020-06-11T16:53:15 | 5592845.0 |
+| 2020-06-11T16:53:22 | 5592953.333333333 |
+| 2020-06-11T16:53:29 | 5593053.0 |
+| 2020-06-11T16:53:36 | 5593127.0 |
+| 2020-06-11T16:53:43 | 5593218.833333333 |
+| 2020-06-11T16:53:50 | 5593328.5 |
+| 2020-06-11T16:53:57 | 5593426.5 |
+| 2020-06-11T16:54:04 | 5593513.333333333 |
+| 2020-06-11T16:54:11 | 5593588.0 |
+| 2020-06-11T16:54:18 | 5593662.0 |
++---------------------+-------------------+
+-- InfluxQL: SELECT difference(usage_idle), non_negative_difference(usage_idle), moving_average(usage_idle, 4) FROM cpu WHERE time >= '2020-06-11T16:53:00Z' AND time < '2020-06-11T16:55:00Z' AND cpu =~ /^cpu(0|1)$/ GROUP BY cpu;
+name: cpu
+tags: cpu=cpu0
++---------------------+----------------------+-------------------------+-------------------+
+| time | difference | non_negative_difference | moving_average |
++---------------------+----------------------+-------------------------+-------------------+
+| 2020-06-11T16:53:10 | 2.976802976802972 | 2.976802976802972 | |
+| 2020-06-11T16:53:20 | -0.878900878900879 | | |
+| 2020-06-11T16:53:30 | 1.3890109890109983 | 1.3890109890109983 | 89.05349145349146 |
+| 2020-06-11T16:53:40 | -0.10970970970971905 | | 89.89779229779231 |
+| 2020-06-11T16:53:50 | -0.4902902902902895 | | 89.87531982531982 |
+| 2020-06-11T16:54:00 | 0.29009009009008935 | 0.29009009009008935 | 90.14509509509509 |
+| 2020-06-11T16:54:10 | -1.2677348006689328 | | 89.75068391745037 |
++---------------------+----------------------+-------------------------+-------------------+
+name: cpu
+tags: cpu=cpu1
++---------------------+------------------------+-------------------------+-------------------+
+| time | difference | non_negative_difference | moving_average |
++---------------------+------------------------+-------------------------+-------------------+
+| 2020-06-11T16:53:10 | -0.09980019980019961 | | |
+| 2020-06-11T16:53:20 | 0.09980019980019961 | 0.09980019980019961 | |
+| 2020-06-11T16:53:30 | 0.0 | 0.0 | 99.87504995004994 |
+| 2020-06-11T16:53:40 | -0.0001001001001128543 | | 99.87502492502492 |
+| 2020-06-11T16:53:50 | -0.09970009970008675 | | 99.87502492502492 |
+| 2020-06-11T16:54:00 | 0.09959979899859661 | 0.09959979899859661 | 99.87497482482453 |
+| 2020-06-11T16:54:10 | -0.09959979899859661 | | 99.85002477487447 |
++---------------------+------------------------+-------------------------+-------------------+
+-- InfluxQL: SELECT difference(mean(usage_idle)), non_negative_difference(mean(usage_idle)), moving_average(mean(usage_idle), 4) FROM cpu WHERE time >= '2020-06-11T16:53:00Z' AND time < '2020-06-11T16:55:00Z' AND cpu =~ /^cpu(0|1)$/ GROUP BY TIME(30s), cpu;
+name: cpu
+tags: cpu=cpu0
++---------------------+---------------------+-------------------------+-------------------+
+| time | difference | non_negative_difference | moving_average |
++---------------------+---------------------+-------------------------+-------------------+
+| 2020-06-11T16:53:00 | 1.9489658560635377 | 1.9489658560635377 | |
+| 2020-06-11T16:53:30 | 1.5587748254415033 | 1.5587748254415033 | |
+| 2020-06-11T16:54:00 | -0.7072074070078145 | | 88.71999936827484 |
++---------------------+---------------------+-------------------------+-------------------+
+name: cpu
+tags: cpu=cpu1
++---------------------+------------------------+-------------------------+-------------------+
+| time | difference | non_negative_difference | moving_average |
++---------------------+------------------------+-------------------------+-------------------+
+| 2020-06-11T16:53:00 | 0.03313852030396447 | 0.03313852030396447 | |
+| 2020-06-11T16:53:30 | -0.0000333667000376181 | | |
+| 2020-06-11T16:54:00 | -0.0167002003341139 | | 99.85425690322373 |
++---------------------+------------------------+-------------------------+-------------------+
\ No newline at end of file
diff --git a/iox_query_influxql/src/plan/planner.rs b/iox_query_influxql/src/plan/planner.rs
index c8f58a5b55..0dc30ce98a 100644
--- a/iox_query_influxql/src/plan/planner.rs
+++ b/iox_query_influxql/src/plan/planner.rs
@@ -6,8 +6,8 @@ use crate::plan::planner::select::{
};
use crate::plan::planner_time_range_expression::time_range_to_df_expr;
use crate::plan::rewriter::{find_table_names, rewrite_statement, ProjectionType};
-use crate::plan::udaf::{AVG_N, DIFFERENCE};
-use crate::plan::udf::{difference, find_window_udfs, moving_average};
+use crate::plan::udaf::{AVG_N, DIFFERENCE, NON_NEGATIVE_DIFFERENCE};
+use crate::plan::udf::{difference, find_window_udfs, moving_average, non_negative_difference};
use crate::plan::util::{binary_operator_to_df_operator, rebase_expr, Schemas};
use crate::plan::var_ref::var_ref_data_type_to_data_type;
use crate::plan::{error, planner_rewrite_expression, udf, util_copy};
@@ -419,6 +419,7 @@ impl<'a> Context<'a> {
self.projection_type,
ProjectionType::Aggregate
| ProjectionType::WindowAggregate
+ | ProjectionType::WindowAggregateMixed
| ProjectionType::Selector { .. }
)
}
@@ -551,6 +552,19 @@ impl<'a> InfluxQLToLogicalPlan<'a> {
};
let plan = self.project_select(&ctx, plan, &fields, &group_by_tag_set)?;
+
+ // TODO(sgc): Handle FILL(N) and FILL(previous)
+ //
+ // NOTE:
+ //
+ // The final plan should respect InfluxQL OG fill behaviour, such that if
+ // the query fill behaviour is either `FILL(N)` or `FILL(previous)` and
+ // the column is a field with a `NULL` value, to use the `FILL` value.
+ //
+ // If the ProjectionType is `Aggregate`, we can skip this, as it will already be
+ // handled by the GapFill operator.
+ //
+ // See: https://github.com/influxdata/influxdb/blob/f365bb7e3a9c5e227dbf66d84adf674d3d127176/query/select.go#L635-L642
plans.push((table_name, plan));
}
@@ -761,6 +775,10 @@ impl<'a> InfluxQLToLogicalPlan<'a> {
fields: &[Field],
group_by_tag_set: &[&str],
) -> Result<LogicalPlan> {
+ if ctx.projection_type == ProjectionType::WindowAggregateMixed {
+ return error::not_implemented("mixed window-aggregate and aggregate columns, such as DIFFERENCE(MEAN(col)), MEAN(col)");
+ }
+
let schemas = Schemas::new(input.schema())?;
// Transform InfluxQL AST field expressions to a list of DataFusion expressions.
@@ -798,21 +816,28 @@ impl<'a> InfluxQLToLogicalPlan<'a> {
// Wrap the plan in a `LogicalPlan::Projection` from the select expressions
let plan = project(plan, select_exprs)?;
- // filter out rows when all fields are `NULL`
- //
- // TODO(sgc): this should respect InfluxQL OG fill behaviour.
- //
- // See: https://github.com/influxdata/influxdb/blob/f365bb7e3a9c5e227dbf66d84adf674d3d127176/query/select.go#L635-L642
- match conjunction(fields.iter().filter_map(|f| {
- if matches!(f.data_type, Some(InfluxColumnType::Field(_))) {
- Some(f.name.as_expr().is_null())
- } else {
- None
+ if matches!(
+ ctx.projection_type,
+ ProjectionType::WindowAggregate | ProjectionType::Window
+ ) {
+ // InfluxQL OG physical operators for
+
+ // generate a predicate to filter rows where all field values of the row are `NULL`,
+ // like:
+ //
+ // NOT (field1 IS NULL AND field2 IS NULL AND ...)
+ match conjunction(fields.iter().filter_map(|f| {
+ if matches!(f.data_type, Some(InfluxColumnType::Field(_))) {
+ Some(f.name.as_expr().is_null())
+ } else {
+ None
+ }
+ })) {
+ Some(expr) => LogicalPlanBuilder::from(plan).filter(expr.not())?.build(),
+ None => Ok(plan),
}
- })) {
- // Some(expr) => LogicalPlanBuilder::from(plan).filter(expr.not())?.build(),
- Some(expr) => Ok(plan),
- None => Ok(plan),
+ } else {
+ Ok(plan)
}
}
@@ -1061,29 +1086,19 @@ impl<'a> InfluxQLToLogicalPlan<'a> {
return error::internal(format!("udf_to_expr: unexpected expression: {e}"))
};
- match udf::WindowFunction::try_from_scalar_udf(fun.clone()) {
- Some(udf::WindowFunction::MovingAverage) => {
- let Some(Expr::Literal(ScalarValue::Int64(Some(arg1)))) = args.get(1).cloned() else {
- return error::internal("expected Int64 for second argument")
- };
-
- // Subtract 1 from the window to match InfluxQL behaviour.
- let arg1 = ScalarValue::UInt64(Some((arg1 - 1) as u64));
-
- Ok(Expr::WindowFunction(WindowFunction {
- fun: window_function::WindowFunction::AggregateUDF(AVG_N.clone()),
- args,
- partition_by,
- order_by,
- window_frame: WindowFrame {
- units: WindowFrameUnits::Rows,
- start_bound: WindowFrameBound::Preceding(ScalarValue::Null),
- // start_bound: WindowFrameBound::Preceding(arg1),
- end_bound: WindowFrameBound::CurrentRow,
- },
- })
- .alias(alias))
- }
+ match udf::WindowFunction::try_from_scalar_udf(Arc::clone(&fun)) {
+ Some(udf::WindowFunction::MovingAverage) => Ok(Expr::WindowFunction(WindowFunction {
+ fun: window_function::WindowFunction::AggregateUDF(AVG_N.clone()),
+ args,
+ partition_by,
+ order_by,
+ window_frame: WindowFrame {
+ units: WindowFrameUnits::Rows,
+ start_bound: WindowFrameBound::Preceding(ScalarValue::Null),
+ end_bound: WindowFrameBound::CurrentRow,
+ },
+ })
+ .alias(alias)),
Some(udf::WindowFunction::Difference) => Ok(Expr::WindowFunction(WindowFunction {
fun: window_function::WindowFunction::AggregateUDF(DIFFERENCE.clone()),
args,
@@ -1096,6 +1111,22 @@ impl<'a> InfluxQLToLogicalPlan<'a> {
},
})
.alias(alias)),
+ Some(udf::WindowFunction::NonNegativeDifference) => {
+ Ok(Expr::WindowFunction(WindowFunction {
+ fun: window_function::WindowFunction::AggregateUDF(
+ NON_NEGATIVE_DIFFERENCE.clone(),
+ ),
+ args,
+ partition_by,
+ order_by,
+ window_frame: WindowFrame {
+ units: WindowFrameUnits::Rows,
+ start_bound: WindowFrameBound::Preceding(ScalarValue::Null),
+ end_bound: WindowFrameBound::CurrentRow,
+ },
+ })
+ .alias(alias))
+ }
None => error::internal(format!(
"unexpected user-defined window function: {}",
fun.name
@@ -1515,6 +1546,17 @@ impl<'a> InfluxQLToLogicalPlan<'a> {
Ok(difference(vec![arg0]))
}
+ "non_negative_difference" => {
+ check_arg_count(name, args, 1)?;
+
+ // arg0 should be a column or function
+ let arg0 = self.expr_to_df_expr(scope, &args[0], schemas)?;
+ if let Expr::Literal(ScalarValue::Null) = arg0 {
+ return Ok(arg0);
+ }
+
+ Ok(non_negative_difference(vec![arg0]))
+ }
"moving_average" => {
check_arg_count(name, args, 2)?;
@@ -3292,13 +3334,90 @@ mod test {
"###);
}
- #[test]
- fn test_window_functions() {
- // let res = plan("SELECT MOVING_AVERAGE(usage_idle, 2) FROM cpu");
- // println!("{res}");
- let res = plan("SELECT DIFFERENCE(usage_idle) FROM cpu");
- println!("{res}");
- // assert_snapshot!(plan("SELECT MOVING_AVERAGE(usage_idle, 2) FROM cpu"), @"");
+ mod window_functions {
+ use super::*;
+
+ #[test]
+ fn test_difference() {
+ // no aggregates
+ assert_snapshot!(plan("SELECT DIFFERENCE(usage_idle) FROM cpu"), @r###"
+ Sort: time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), difference:Float64;N]
+ Projection: Dictionary(Int32, Utf8("cpu")) AS iox::measurement, time, difference [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), difference:Float64;N]
+ Filter: NOT difference IS NULL [time:Timestamp(Nanosecond, None), difference:Float64;N]
+ Projection: cpu.time AS time, difference(cpu.usage_idle) AS difference [time:Timestamp(Nanosecond, None), difference:Float64;N]
+ WindowAggr: windowExpr=[[AggregateUDF { name: "difference", signature: Signature { type_signature: OneOf([Exact([Int64]), Exact([Float64]), Exact([UInt64])]), volatility: Immutable }, fun: "<FUNC>" }(cpu.usage_idle) ORDER BY [cpu.time ASC NULLS LAST] ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW AS difference(cpu.usage_idle)]] [cpu:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), usage_idle:Float64;N, usage_system:Float64;N, usage_user:Float64;N, difference(cpu.usage_idle):Float64;N]
+ TableScan: cpu [cpu:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), usage_idle:Float64;N, usage_system:Float64;N, usage_user:Float64;N]
+ "###);
+
+ // aggregate
+ assert_snapshot!(plan("SELECT DIFFERENCE(MEAN(usage_idle)) FROM cpu GROUP BY TIME(10s)"), @r###"
+ Sort: time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None);N, difference:Float64;N]
+ Projection: Dictionary(Int32, Utf8("cpu")) AS iox::measurement, time, difference [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None);N, difference:Float64;N]
+ Filter: NOT difference IS NULL [time:Timestamp(Nanosecond, None);N, difference:Float64;N]
+ Projection: time, difference(AVG(cpu.usage_idle)) AS difference [time:Timestamp(Nanosecond, None);N, difference:Float64;N]
+ WindowAggr: windowExpr=[[AggregateUDF { name: "difference", signature: Signature { type_signature: OneOf([Exact([Int64]), Exact([Float64]), Exact([UInt64])]), volatility: Immutable }, fun: "<FUNC>" }(AVG(cpu.usage_idle)) ORDER BY [time ASC NULLS LAST] ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW AS difference(AVG(cpu.usage_idle))]] [time:Timestamp(Nanosecond, None);N, AVG(cpu.usage_idle):Float64;N, difference(AVG(cpu.usage_idle)):Float64;N]
+ GapFill: groupBy=[[time]], aggr=[[AVG(cpu.usage_idle)]], time_column=time, stride=IntervalMonthDayNano("10000000000"), range=Unbounded..Included(TimestampNanosecond(1672531200000000000, None)) [time:Timestamp(Nanosecond, None);N, AVG(cpu.usage_idle):Float64;N]
+ Aggregate: groupBy=[[date_bin(IntervalMonthDayNano("10000000000"), cpu.time, TimestampNanosecond(0, None)) AS time]], aggr=[[AVG(cpu.usage_idle)]] [time:Timestamp(Nanosecond, None);N, AVG(cpu.usage_idle):Float64;N]
+ Filter: cpu.time <= TimestampNanosecond(1672531200000000000, None) [cpu:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), usage_idle:Float64;N, usage_system:Float64;N, usage_user:Float64;N]
+ TableScan: cpu [cpu:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), usage_idle:Float64;N, usage_system:Float64;N, usage_user:Float64;N]
+ "###)
+ }
+
+ #[test]
+ fn test_non_negative_difference() {
+ // no aggregates
+ assert_snapshot!(plan("SELECT NON_NEGATIVE_DIFFERENCE(usage_idle) FROM cpu"), @r###"
+ Sort: time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), non_negative_difference:Float64;N]
+ Projection: Dictionary(Int32, Utf8("cpu")) AS iox::measurement, time, non_negative_difference [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), non_negative_difference:Float64;N]
+ Filter: NOT non_negative_difference IS NULL [time:Timestamp(Nanosecond, None), non_negative_difference:Float64;N]
+ Projection: cpu.time AS time, non_negative_difference(cpu.usage_idle) AS non_negative_difference [time:Timestamp(Nanosecond, None), non_negative_difference:Float64;N]
+ WindowAggr: windowExpr=[[AggregateUDF { name: "non_negative_difference", signature: Signature { type_signature: OneOf([Exact([Int64]), Exact([Float64]), Exact([UInt64])]), volatility: Immutable }, fun: "<FUNC>" }(cpu.usage_idle) ORDER BY [cpu.time ASC NULLS LAST] ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW AS non_negative_difference(cpu.usage_idle)]] [cpu:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), usage_idle:Float64;N, usage_system:Float64;N, usage_user:Float64;N, non_negative_difference(cpu.usage_idle):Float64;N]
+ TableScan: cpu [cpu:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), usage_idle:Float64;N, usage_system:Float64;N, usage_user:Float64;N]
+ "###);
+
+ // aggregate
+ assert_snapshot!(plan("SELECT NON_NEGATIVE_DIFFERENCE(MEAN(usage_idle)) FROM cpu GROUP BY TIME(10s)"), @r###"
+ Sort: time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None);N, non_negative_difference:Float64;N]
+ Projection: Dictionary(Int32, Utf8("cpu")) AS iox::measurement, time, non_negative_difference [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None);N, non_negative_difference:Float64;N]
+ Filter: NOT non_negative_difference IS NULL [time:Timestamp(Nanosecond, None);N, non_negative_difference:Float64;N]
+ Projection: time, non_negative_difference(AVG(cpu.usage_idle)) AS non_negative_difference [time:Timestamp(Nanosecond, None);N, non_negative_difference:Float64;N]
+ WindowAggr: windowExpr=[[AggregateUDF { name: "non_negative_difference", signature: Signature { type_signature: OneOf([Exact([Int64]), Exact([Float64]), Exact([UInt64])]), volatility: Immutable }, fun: "<FUNC>" }(AVG(cpu.usage_idle)) ORDER BY [time ASC NULLS LAST] ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW AS non_negative_difference(AVG(cpu.usage_idle))]] [time:Timestamp(Nanosecond, None);N, AVG(cpu.usage_idle):Float64;N, non_negative_difference(AVG(cpu.usage_idle)):Float64;N]
+ GapFill: groupBy=[[time]], aggr=[[AVG(cpu.usage_idle)]], time_column=time, stride=IntervalMonthDayNano("10000000000"), range=Unbounded..Included(TimestampNanosecond(1672531200000000000, None)) [time:Timestamp(Nanosecond, None);N, AVG(cpu.usage_idle):Float64;N]
+ Aggregate: groupBy=[[date_bin(IntervalMonthDayNano("10000000000"), cpu.time, TimestampNanosecond(0, None)) AS time]], aggr=[[AVG(cpu.usage_idle)]] [time:Timestamp(Nanosecond, None);N, AVG(cpu.usage_idle):Float64;N]
+ Filter: cpu.time <= TimestampNanosecond(1672531200000000000, None) [cpu:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), usage_idle:Float64;N, usage_system:Float64;N, usage_user:Float64;N]
+ TableScan: cpu [cpu:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), usage_idle:Float64;N, usage_system:Float64;N, usage_user:Float64;N]
+ "###)
+ }
+
+ #[test]
+ fn test_moving_average() {
+ // no aggregates
+ assert_snapshot!(plan("SELECT MOVING_AVERAGE(usage_idle, 3) FROM cpu"), @r###"
+ Sort: time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), moving_average:Float64;N]
+ Projection: Dictionary(Int32, Utf8("cpu")) AS iox::measurement, time, moving_average [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), moving_average:Float64;N]
+ Filter: NOT moving_average IS NULL [time:Timestamp(Nanosecond, None), moving_average:Float64;N]
+ Projection: cpu.time AS time, moving_average(cpu.usage_idle,Int64(3)) AS moving_average [time:Timestamp(Nanosecond, None), moving_average:Float64;N]
+ WindowAggr: windowExpr=[[AggregateUDF { name: "avg_n", signature: Signature { type_signature: OneOf([Exact([Int64, Int64]), Exact([Float64, Int64]), Exact([UInt64, Int64])]), volatility: Immutable }, fun: "<FUNC>" }(cpu.usage_idle, Int64(3)) ORDER BY [cpu.time ASC NULLS LAST] ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW AS moving_average(cpu.usage_idle,Int64(3))]] [cpu:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), usage_idle:Float64;N, usage_system:Float64;N, usage_user:Float64;N, moving_average(cpu.usage_idle,Int64(3)):Float64;N]
+ TableScan: cpu [cpu:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), usage_idle:Float64;N, usage_system:Float64;N, usage_user:Float64;N]
+ "###);
+
+ // aggregate
+ assert_snapshot!(plan("SELECT MOVING_AVERAGE(MEAN(usage_idle), 3) FROM cpu GROUP BY TIME(10s)"), @r###"
+ Sort: time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None);N, moving_average:Float64;N]
+ Projection: Dictionary(Int32, Utf8("cpu")) AS iox::measurement, time, moving_average [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None);N, moving_average:Float64;N]
+ Filter: NOT moving_average IS NULL [time:Timestamp(Nanosecond, None);N, moving_average:Float64;N]
+ Projection: time, moving_average(AVG(cpu.usage_idle),Int64(3)) AS moving_average [time:Timestamp(Nanosecond, None);N, moving_average:Float64;N]
+ WindowAggr: windowExpr=[[AggregateUDF { name: "avg_n", signature: Signature { type_signature: OneOf([Exact([Int64, Int64]), Exact([Float64, Int64]), Exact([UInt64, Int64])]), volatility: Immutable }, fun: "<FUNC>" }(AVG(cpu.usage_idle), Int64(3)) ORDER BY [time ASC NULLS LAST] ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW AS moving_average(AVG(cpu.usage_idle),Int64(3))]] [time:Timestamp(Nanosecond, None);N, AVG(cpu.usage_idle):Float64;N, moving_average(AVG(cpu.usage_idle),Int64(3)):Float64;N]
+ GapFill: groupBy=[[time]], aggr=[[AVG(cpu.usage_idle)]], time_column=time, stride=IntervalMonthDayNano("10000000000"), range=Unbounded..Included(TimestampNanosecond(1672531200000000000, None)) [time:Timestamp(Nanosecond, None);N, AVG(cpu.usage_idle):Float64;N]
+ Aggregate: groupBy=[[date_bin(IntervalMonthDayNano("10000000000"), cpu.time, TimestampNanosecond(0, None)) AS time]], aggr=[[AVG(cpu.usage_idle)]] [time:Timestamp(Nanosecond, None);N, AVG(cpu.usage_idle):Float64;N]
+ Filter: cpu.time <= TimestampNanosecond(1672531200000000000, None) [cpu:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), usage_idle:Float64;N, usage_system:Float64;N, usage_user:Float64;N]
+ TableScan: cpu [cpu:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), usage_idle:Float64;N, usage_system:Float64;N, usage_user:Float64;N]
+ "###)
+ }
+
+ fn test_not_implemented() {
+ assert_snapshot!(plan("SELECT DIFFERENCE(MEAN(usage_idle)), MEAN(usage_idle) FROM cpu GROUP BY TIME(10s)"), @"This feature is not implemented: mixed window-aggregate and aggregate columns, such as DIFFERENCE(MEAN(col)), MEAN(col)");
+ }
}
/// Tests for the `DISTINCT` clause and `DISTINCT` function
diff --git a/iox_query_influxql/src/plan/rewriter.rs b/iox_query_influxql/src/plan/rewriter.rs
index dec7c3384c..ce6b453f9e 100644
--- a/iox_query_influxql/src/plan/rewriter.rs
+++ b/iox_query_influxql/src/plan/rewriter.rs
@@ -952,6 +952,11 @@ struct FieldChecker {
/// Accumulator for the number of selector expressions for the statement.
selector_count: usize,
+ // Set to `true` if any window or aggregate functions are expected to
+ // only produce non-null results.
+ //
+ // This replicates the
+ // filter_null_rows: bool,
}
impl FieldChecker {
@@ -1013,14 +1018,21 @@ impl FieldChecker {
}
}
- // By this point the statement is valid, so lets
- // determine the projection type
+ // At this point the statement is valid, and numerous preconditions
+ // have been met. The final state of the `FieldChecker` is inspected
+ // to determine the type of projection. The ProjectionType dictates
+ // how the query will be planned and other cases, such as how NULL
+ // values are handled, to ensure compatibility with InfluxQL OG.
let projection_type = if self.has_top_bottom {
ProjectionType::TopBottomSelector
} else if self.has_group_by_time {
if self.window_count > 0 {
- ProjectionType::WindowAggregate
+ if self.window_count == self.aggregate_count {
+ ProjectionType::WindowAggregate
+ } else {
+ ProjectionType::WindowAggregateMixed
+ }
} else {
ProjectionType::Aggregate
}
@@ -1557,6 +1569,10 @@ pub(crate) enum ProjectionType {
Window,
/// A query that projects a combination of window and nested aggregate functions.
WindowAggregate,
+ /// A query that projects a combination of window and nested aggregate functions, including
+ /// separate projections that are just aggregates. This requires special handling of
+ /// windows that produce `NULL` results.
+ WindowAggregateMixed,
/// A query that projects a single selector function,
/// such as `last` or `first`.
Selector {
@@ -1738,6 +1754,12 @@ mod test {
.unwrap();
assert_matches!(info.projection_type, ProjectionType::WindowAggregate);
+ let info = select_statement_info(&parse_select(
+ "SELECT difference(count(foo)), mean(foo) FROM cpu GROUP BY TIME(10s)",
+ ))
+ .unwrap();
+ assert_matches!(info.projection_type, ProjectionType::WindowAggregateMixed);
+
let info = select_statement_info(&parse_select("SELECT top(foo, 3) FROM cpu")).unwrap();
assert_matches!(info.projection_type, ProjectionType::TopBottomSelector);
}
diff --git a/iox_query_influxql/src/plan/udaf.rs b/iox_query_influxql/src/plan/udaf.rs
index 9773696e08..8b92142556 100644
--- a/iox_query_influxql/src/plan/udaf.rs
+++ b/iox_query_influxql/src/plan/udaf.rs
@@ -1,14 +1,12 @@
use crate::plan::error;
-use arrow::array::{Array, ArrayRef, Int64Array, UInt64Array};
+use arrow::array::{Array, ArrayRef, Int64Array};
use arrow::datatypes::DataType;
use datafusion::common::{downcast_value, DataFusionError, Result, ScalarValue};
use datafusion::logical_expr::{
Accumulator, AccumulatorFunctionImplementation, AggregateUDF, ReturnTypeFunction, Signature,
StateTypeFunction, TypeSignature, Volatility,
};
-use datafusion::physical_expr::expressions::AvgAccumulator;
use once_cell::sync::Lazy;
-use std::iter;
use std::sync::Arc;
pub(crate) const AVG_N_NAME: &str = "avg_n";
@@ -16,7 +14,7 @@ pub(crate) const AVG_N_NAME: &str = "avg_n";
pub(crate) static AVG_N: Lazy<Arc<AggregateUDF>> = Lazy::new(|| {
let rt_func: ReturnTypeFunction = Arc::new(move |_| Ok(Arc::new(DataType::Float64)));
let accumulator: AccumulatorFunctionImplementation =
- Arc::new(|_| Ok(Box::new(AvgNAcc::try_new(&DataType::Float64))));
+ Arc::new(|_| Ok(Box::new(AvgNAccumulator::try_new(&DataType::Float64))));
// State is count, sum, N
let st_func: StateTypeFunction = Arc::new(move |_| {
Ok(Arc::new(vec![
@@ -44,51 +42,60 @@ pub(crate) static AVG_N: Lazy<Arc<AggregateUDF>> = Lazy::new(|| {
Arc::new(udaf)
});
+/// A moving average accumulator that accumulates exactly `N` values
+/// before producing a non-null result.
#[derive(Debug)]
-struct AvgNAcc {
+struct AvgNAccumulator {
+ /// The data type of the values being accumulated in [`Self::all_values`].
data_type: DataType,
+ all_values: Vec<ScalarValue>,
+ /// Holds the number of non-null values to be accumulated and represents
+ /// the second argument to the `AVG_N` aggregate function.
n: usize,
+ /// The index into [`Self::all_values`] to store the next non-null value.
i: usize,
+ /// `true` if the last value observed was `NULL`
last_is_null: bool,
- all_values: Vec<ScalarValue>,
}
-impl AvgNAcc {
+impl AvgNAccumulator {
/// Creates a try_new `AvgNAcc`
pub fn try_new(datatype: &DataType) -> Self {
Self {
data_type: datatype.clone(),
+ all_values: vec![],
n: 0,
i: 0,
last_is_null: true,
- all_values: vec![],
}
}
}
-impl Accumulator for AvgNAcc {
+impl Accumulator for AvgNAccumulator {
fn state(&self) -> Result<Vec<ScalarValue>> {
let state = ScalarValue::new_list(Some(self.all_values.clone()), self.data_type.clone());
Ok(vec![state])
}
fn update_batch(&mut self, values: &[ArrayRef]) -> Result<()> {
- assert_eq!(values.len(), 2);
- let array = &values[0];
+ assert_eq!(values.len(), 2, "AVG_N expects two arguments");
- // the second element of the values array is the second argument to the `AVG_N` function,
+ // The second element of the values array is the second argument to the `AVG_N` function,
// which specifies the minimum number of values that must be aggregated.
let n_values = downcast_value!(&values[1], Int64Array);
let n = n_values.value(0) as usize;
- // first observation of the second argument N
+ // first observation of the second argument, N
if self.n == 0 {
assert!(self.all_values.is_empty());
self.n = n;
self.all_values = vec![ScalarValue::try_from(&self.data_type)?; n];
} else if self.n != n {
- return Err(DataFusionError::External("N is not constant".into()));
+ return Err(DataFusionError::External(
+ "AVG_N: N must be constant".into(),
+ ));
}
+ let array = &values[0];
assert!(array.len() < 2, "this accumulator should be used with an");
for index in 0..array.len() {
let value = ScalarValue::try_from_array(array, index)?;
@@ -102,12 +109,11 @@ impl Accumulator for AvgNAcc {
Ok(())
}
- fn merge_batch(&mut self, states: &[ArrayRef]) -> Result<()> {
- todo!();
+ fn merge_batch(&mut self, _states: &[ArrayRef]) -> Result<()> {
+ todo!("Discuss usage with Andrew Lamb")
}
fn evaluate(&self) -> Result<ScalarValue> {
- // if any values are NULL return a null result
if self.last_is_null || self.all_values.iter().any(|v| v.is_null()) {
return ScalarValue::try_from(&self.data_type);
}
@@ -117,6 +123,7 @@ impl Accumulator for AvgNAcc {
.iter()
.cloned()
.reduce(|acc, v| acc.add(v).unwrap())
+ // safe to unwrap, as all_values is known to contain only the same primitive values
.unwrap();
let n = self.n as f64;
@@ -124,8 +131,7 @@ impl Accumulator for AvgNAcc {
ScalarValue::Float64(Some(v)) => ScalarValue::from(v / n),
ScalarValue::Int64(Some(v)) => ScalarValue::from(v as f64 / n),
ScalarValue::UInt64(Some(v)) => ScalarValue::from(v as f64 / n),
- // TODO(sgc): This should return an error
- _ => ScalarValue::try_from(&self.data_type).unwrap(),
+ _ => return error::internal("unexpected scalar value type"),
})
}
@@ -137,89 +143,98 @@ impl Accumulator for AvgNAcc {
}
}
-/// An accumulator to compute the average
+pub(crate) const DIFFERENCE_NAME: &str = "difference";
+
+pub(crate) static DIFFERENCE: Lazy<Arc<AggregateUDF>> = Lazy::new(|| {
+ let rt_func: ReturnTypeFunction = Arc::new(move |dt| Ok(Arc::new(dt[0].clone())));
+ let accumulator: AccumulatorFunctionImplementation =
+ Arc::new(|dt| Ok(Box::new(DifferenceAccumulator::try_new(dt)?)));
+ let st_func: StateTypeFunction = Arc::new(move |dt| Ok(Arc::new(vec![dt.clone(), dt.clone()])));
+
+ let udaf = AggregateUDF::new(
+ DIFFERENCE_NAME,
+ &Signature::one_of(
+ vec![
+ TypeSignature::Exact(vec![DataType::Int64]),
+ TypeSignature::Exact(vec![DataType::Float64]),
+ TypeSignature::Exact(vec![DataType::UInt64]),
+ ],
+ Volatility::Immutable,
+ ),
+ &rt_func,
+ &accumulator,
+ &st_func,
+ );
+
+ Arc::new(udaf)
+});
+
#[derive(Debug)]
-struct AvgNAccumulator {
- acc: AvgAccumulator,
- n: u64,
+struct DifferenceAccumulator {
+ data_type: DataType,
+ last: ScalarValue,
+ diff: ScalarValue,
}
-impl AvgNAccumulator {
- /// Creates a try_new `AvgNAccumulator`
- pub fn try_new(datatype: &DataType, return_data_type: &DataType) -> Self {
- Self {
- acc: AvgAccumulator::try_new(datatype, return_data_type).unwrap(),
- n: 0,
- }
+impl DifferenceAccumulator {
+ fn try_new(data_type: &DataType) -> Result<Self> {
+ let last: ScalarValue = data_type.try_into()?;
+ let diff = last.clone();
+ Ok(Self {
+ data_type: data_type.clone(),
+ last,
+ diff,
+ })
}
}
-impl Accumulator for AvgNAccumulator {
+impl Accumulator for DifferenceAccumulator {
fn state(&self) -> Result<Vec<ScalarValue>> {
- Ok(self
- .acc
- .state()?
- .into_iter()
- .chain(iter::once(ScalarValue::UInt64(Some(self.n))))
- .collect())
+ Ok(vec![self.last.clone(), self.diff.clone()])
}
fn update_batch(&mut self, values: &[ArrayRef]) -> Result<()> {
- let col_values = &values[0];
- let col_len = col_values.len();
- let col_nulls = col_values.null_count();
-
- // the second element of the values array is the second argument to the `AVG_N` function,
- // which specifies the minimum number of values that must be aggregated.
- let n_values = downcast_value!(&values[1], Int64Array);
- let n = n_values.value(0);
- if self.n > 0 && self.n != n as u64 {
- Err(DataFusionError::External("N is not constant".into()))
- } else {
- self.n = n as u64;
- self.acc.update_batch(&[Arc::clone(col_values)])
+ if values.is_empty() {
+ return Ok(());
}
+ let arr = &values[0];
+ for index in 0..arr.len() {
+ let scalar = ScalarValue::try_from_array(arr, index)?;
+ if !scalar.is_null() {
+ if !self.last.is_null() {
+ self.diff = scalar.sub(self.last.clone())?
+ }
+ self.last = scalar;
+ } else {
+ self.diff = ScalarValue::try_from(&self.data_type).unwrap()
+ }
+ }
+ Ok(())
}
- fn retract_batch(&mut self, values: &[ArrayRef]) -> Result<()> {
- let values = &values[0];
- self.acc.retract_batch(&[Arc::clone(values)])
- }
-
- fn merge_batch(&mut self, states: &[ArrayRef]) -> Result<()> {
- let n = downcast_value!(states[2], UInt64Array);
- // fetch N from one
- self.n = *n.values().get(0).unwrap_or(&0);
- self.acc.merge_batch(states)
+ fn merge_batch(&mut self, _states: &[ArrayRef]) -> Result<()> {
+ todo!("Discuss usage with Andrew Lamb")
}
fn evaluate(&self) -> Result<ScalarValue> {
- let state = self.state()?;
- // check it has accumulated at least N values, otherwise return NULL
- match state.get(0) {
- Some(ScalarValue::UInt64(Some(count))) if *count < self.n => {
- Ok(ScalarValue::Float64(None))
- }
- Some(ScalarValue::UInt64(None)) | None => Ok(ScalarValue::Float64(None)),
- Some(_) => self.acc.evaluate(),
- }
+ Ok(self.diff.clone())
}
fn size(&self) -> usize {
- self.acc.size()
+ std::mem::size_of_val(self)
}
}
-pub(crate) const DIFFERENCE_NAME: &str = "difference";
+pub(crate) const NON_NEGATIVE_DIFFERENCE_NAME: &str = "non_negative_difference";
-pub(crate) static DIFFERENCE: Lazy<Arc<AggregateUDF>> = Lazy::new(|| {
+pub(crate) static NON_NEGATIVE_DIFFERENCE: Lazy<Arc<AggregateUDF>> = Lazy::new(|| {
let rt_func: ReturnTypeFunction = Arc::new(move |dt| Ok(Arc::new(dt[0].clone())));
let accumulator: AccumulatorFunctionImplementation =
- Arc::new(|dt| Ok(Box::new(DifferenceAccumulator::try_new(dt)?)));
+ Arc::new(|dt| Ok(Box::new(NonNegativeDifferenceAccumulator::try_new(dt)?)));
let st_func: StateTypeFunction = Arc::new(move |dt| Ok(Arc::new(vec![dt.clone(), dt.clone()])));
let udaf = AggregateUDF::new(
- DIFFERENCE_NAME,
+ NON_NEGATIVE_DIFFERENCE_NAME,
&Signature::one_of(
vec![
TypeSignature::Exact(vec![DataType::Int64]),
@@ -237,13 +252,13 @@ pub(crate) static DIFFERENCE: Lazy<Arc<AggregateUDF>> = Lazy::new(|| {
});
#[derive(Debug)]
-struct DifferenceAccumulator {
+struct NonNegativeDifferenceAccumulator {
data_type: DataType,
last: ScalarValue,
diff: ScalarValue,
}
-impl DifferenceAccumulator {
+impl NonNegativeDifferenceAccumulator {
fn try_new(data_type: &DataType) -> Result<Self> {
let last: ScalarValue = data_type.try_into()?;
let diff = last.clone();
@@ -255,7 +270,7 @@ impl DifferenceAccumulator {
}
}
-impl Accumulator for DifferenceAccumulator {
+impl Accumulator for NonNegativeDifferenceAccumulator {
fn state(&self) -> Result<Vec<ScalarValue>> {
Ok(vec![self.last.clone(), self.diff.clone()])
}
@@ -280,12 +295,15 @@ impl Accumulator for DifferenceAccumulator {
}
fn merge_batch(&mut self, _states: &[ArrayRef]) -> Result<()> {
- // This API is not called for difference
- error::not_implemented("DifferenceAccumulator::merge_batch")
+ todo!("Discuss usage with Andrew Lamb")
}
fn evaluate(&self) -> Result<ScalarValue> {
- Ok(self.diff.clone())
+ Ok(match &self.diff {
+ ScalarValue::Float64(Some(v)) if *v < 0.0 => ScalarValue::Float64(None),
+ ScalarValue::Int64(Some(v)) if *v < 0 => ScalarValue::Int64(None),
+ v => v.clone(),
+ })
}
fn size(&self) -> usize {
diff --git a/iox_query_influxql/src/plan/udf.rs b/iox_query_influxql/src/plan/udf.rs
index ca8d6cf348..c2183ce03e 100644
--- a/iox_query_influxql/src/plan/udf.rs
+++ b/iox_query_influxql/src/plan/udf.rs
@@ -18,6 +18,7 @@ use std::sync::Arc;
pub(super) enum WindowFunction {
MovingAverage,
Difference,
+ NonNegativeDifference,
}
impl WindowFunction {
@@ -26,6 +27,7 @@ impl WindowFunction {
match fun.name.as_str() {
MOVING_AVERAGE_UDF_NAME => Some(Self::MovingAverage),
DIFFERENCE_UDF_NAME => Some(Self::Difference),
+ NON_NEGATIVE_DIFFERENCE_UDF_NAME => Some(Self::NonNegativeDifference),
_ => None,
}
}
@@ -36,7 +38,7 @@ impl WindowFunction {
pub(super) fn find_window_udfs(exprs: &[Expr]) -> Vec<Expr> {
find_exprs_in_exprs(
exprs,
- &|nested_expr| matches!(nested_expr, Expr::ScalarUDF(s) if WindowFunction::try_from_scalar_udf(s.fun.clone()).is_some()),
+ &|nested_expr| matches!(nested_expr, Expr::ScalarUDF(s) if WindowFunction::try_from_scalar_udf(Arc::clone(&s.fun)).is_some()),
)
}
@@ -92,6 +94,31 @@ static DIFFERENCE: Lazy<Arc<ScalarUDF>> = Lazy::new(|| {
))
});
+const NON_NEGATIVE_DIFFERENCE_UDF_NAME: &str = "non_negative_difference";
+
+/// Create an expression to represent the `NON_NEGATIVE_DIFFERENCE` function.
+pub(crate) fn non_negative_difference(args: Vec<Expr>) -> Expr {
+ NON_NEGATIVE_DIFFERENCE.call(args)
+}
+
+/// Definition of the `NON_NEGATIVE_DIFFERENCE` function.
+static NON_NEGATIVE_DIFFERENCE: Lazy<Arc<ScalarUDF>> = Lazy::new(|| {
+ let return_type_fn: ReturnTypeFunction = Arc::new(|args| Ok(Arc::new(args[0].clone())));
+ Arc::new(ScalarUDF::new(
+ NON_NEGATIVE_DIFFERENCE_UDF_NAME,
+ &Signature::one_of(
+ vec![
+ TypeSignature::Exact(vec![DataType::Float64]),
+ TypeSignature::Exact(vec![DataType::Int64]),
+ TypeSignature::Exact(vec![DataType::UInt64]),
+ ],
+ Volatility::Volatile,
+ ),
+ &return_type_fn,
+ &stand_in_impl(NON_NEGATIVE_DIFFERENCE_UDF_NAME),
+ ))
+});
+
/// Returns an implementation that always returns an error.
fn stand_in_impl(name: &'static str) -> ScalarFunctionImplementation {
Arc::new(move |_| error::internal(format!("{name} should not exist in the final logical plan")))
|
01a22f8a2c5aa218a4a38cc22c048e8305f7ab0c
|
Stuart Carnie
|
2023-06-06 12:54:05
|
Simplify WHERE clause handling
|
We don't need `Context` in the expression rewriting, which eliminates
a redundant `Context::default()` call in all the `SHOW` (metadata)
queries. Also simplified the time-range handling of metadata queries,
as we now have API to extract the time range from a `WHERE` clause
predicate and add the time filter to the same `Filter` logical plan
node.
| null |
chore: Simplify WHERE clause handling
We don't need `Context` in the expression rewriting, which eliminates
a redundant `Context::default()` call in all the `SHOW` (metadata)
queries. Also simplified the time-range handling of metadata queries,
as we now have API to extract the time range from a `WHERE` clause
predicate and add the time filter to the same `Filter` logical plan
node.
|
diff --git a/iox_query_influxql/src/plan/ir.rs b/iox_query_influxql/src/plan/ir.rs
index 92d3720998..f0937a03b5 100644
--- a/iox_query_influxql/src/plan/ir.rs
+++ b/iox_query_influxql/src/plan/ir.rs
@@ -7,11 +7,12 @@ use influxdb_influxql_parser::common::{
LimitClause, MeasurementName, OffsetClause, OrderByClause, QualifiedMeasurementName,
WhereClause,
};
-use influxdb_influxql_parser::expression::Expr;
+use influxdb_influxql_parser::expression::{ConditionalExpression, Expr};
use influxdb_influxql_parser::select::{
FieldList, FillClause, FromMeasurementClause, GroupByClause, MeasurementSelection,
SelectStatement, TimeZoneClause,
};
+use influxdb_influxql_parser::time_range::TimeRange;
use schema::{InfluxColumnType, Schema};
use std::collections::HashSet;
use std::fmt::{Display, Formatter};
@@ -38,8 +39,12 @@ pub(super) struct Select {
/// A list of data sources for the selection.
pub(super) from: Vec<DataSource>,
- /// A conditional expression to filter the selection.
- pub(super) condition: Option<WhereClause>,
+ /// A conditional expression to filter the selection, excluding any predicates for the `time`
+ /// column.
+ pub(super) condition: Option<ConditionalExpression>,
+
+ /// The time range derived from the `WHERE` clause of the `SELECT` statement.
+ pub(super) time_range: Option<TimeRange>,
/// The GROUP BY clause of the selection.
pub(super) group_by: Option<GroupByClause>,
@@ -100,7 +105,7 @@ impl From<Select> for SelectStatement {
})
.collect(),
),
- condition: value.condition,
+ condition: where_clause(value.condition, value.time_range),
group_by: value.group_by,
fill: value.fill,
order_by: value.order_by,
@@ -113,6 +118,34 @@ impl From<Select> for SelectStatement {
}
}
+/// Combine the `condition` and `time_range` into a single `WHERE` predicate.
+fn where_clause(
+ condition: Option<ConditionalExpression>,
+ time_range: Option<TimeRange>,
+) -> Option<WhereClause> {
+ let time_expr: Option<ConditionalExpression> = if let Some(t) = time_range {
+ Some(
+ match (t.lower, t.upper) {
+ (Some(lower), Some(upper)) if lower == upper => format!("time = {lower}"),
+ (Some(lower), Some(upper)) => format!("time >= {lower} AND time <= {upper}"),
+ (Some(lower), None) => format!("time >= {lower}"),
+ (None, Some(upper)) => format!("time <= {upper}"),
+ (None, None) => unreachable!(),
+ }
+ .parse()
+ .unwrap(),
+ )
+ } else {
+ None
+ };
+
+ match (time_expr, condition) {
+ (Some(lhs), Some(rhs)) => Some(WhereClause::new(lhs.and(rhs))),
+ (Some(expr), None) | (None, Some(expr)) => Some(WhereClause::new(expr)),
+ (None, None) => None,
+ }
+}
+
/// Represents a data source that is either a table or a subquery in a [`Select`] from clause.
#[derive(Debug, Clone)]
pub(super) enum DataSource {
diff --git a/iox_query_influxql/src/plan/planner.rs b/iox_query_influxql/src/plan/planner.rs
index fb305c12ca..bcb9559a06 100644
--- a/iox_query_influxql/src/plan/planner.rs
+++ b/iox_query_influxql/src/plan/planner.rs
@@ -51,7 +51,7 @@ use influxdb_influxql_parser::show_tag_keys::ShowTagKeysStatement;
use influxdb_influxql_parser::show_tag_values::{ShowTagValuesStatement, WithKeyClause};
use influxdb_influxql_parser::simple_from_clause::ShowFromClause;
use influxdb_influxql_parser::time_range::{
- duration_expr_to_nanoseconds, split_cond, ReduceContext,
+ duration_expr_to_nanoseconds, split_cond, ReduceContext, TimeRange,
};
use influxdb_influxql_parser::timestamp::Timestamp;
use influxdb_influxql_parser::{
@@ -138,6 +138,10 @@ struct Context<'a> {
projection_type: ProjectionType,
tz: Option<Tz>,
+ // WHERE
+ condition: Option<&'a ConditionalExpression>,
+ time_range: Option<TimeRange>,
+
// GROUP BY information
group_by: Option<&'a GroupByClause>,
fill: Option<FillClause>,
@@ -148,36 +152,33 @@ struct Context<'a> {
}
impl<'a> Context<'a> {
- fn new(table_name: &'a str) -> Self {
+ fn new_root(
+ table_name: &'a str,
+ select: &'a Select,
+ root_group_by_tags: &'a [&'a str],
+ ) -> Self {
Self {
table_name,
- ..Default::default()
- }
- }
-
- fn with_projection_type(&self, projection_type: ProjectionType) -> Self {
- Self {
- projection_type,
- ..*self
- }
- }
-
- fn with_timezone(&self, tz: Option<Tz>) -> Self {
- Self { tz, ..*self }
- }
-
- fn with_group_by_fill(&self, select: &'a Select) -> Self {
- Self {
+ projection_type: select.projection_type,
+ tz: select.timezone,
+ condition: select.condition.as_ref(),
+ time_range: select.time_range,
group_by: select.group_by.as_ref(),
fill: select.fill,
- ..*self
+ root_group_by_tags,
}
}
- fn with_root_group_by_tags(&self, root_group_by_tags: &'a [&'a str]) -> Self {
+ fn subquery(&self, select: &'a Select) -> Self {
Self {
- root_group_by_tags,
- ..*self
+ table_name: self.table_name,
+ projection_type: select.projection_type,
+ tz: select.timezone,
+ condition: select.condition.as_ref(),
+ time_range: select.time_range,
+ group_by: select.group_by.as_ref(),
+ fill: select.fill,
+ root_group_by_tags: self.root_group_by_tags,
}
}
@@ -326,11 +327,7 @@ impl<'a> InfluxQLToLogicalPlan<'a> {
let sort_by_measurement = table_names.len() > 1;
let mut plans = Vec::new();
for table_name in table_names {
- let ctx = Context::new(table_name)
- .with_projection_type(select.projection_type)
- .with_timezone(select.timezone)
- .with_group_by_fill(select)
- .with_root_group_by_tags(&group_by_tags);
+ let ctx = Context::new_root(table_name, select, &group_by_tags);
let Some(plan) = self.union_from(&ctx, select)? else {
continue;
@@ -442,11 +439,7 @@ impl<'a> InfluxQLToLogicalPlan<'a> {
}
fn subquery_to_plan(&self, ctx: &Context<'_>, select: &Select) -> Result<Option<LogicalPlan>> {
- let ctx = Context::new(ctx.table_name)
- .with_projection_type(select.projection_type)
- .with_timezone(select.timezone)
- .with_group_by_fill(select)
- .with_root_group_by_tags(ctx.root_group_by_tags);
+ let ctx = ctx.subquery(select);
let Some(plan) = self.union_from(&ctx, select)? else {
return Ok(None)
@@ -504,8 +497,13 @@ impl<'a> InfluxQLToLogicalPlan<'a> {
let schemas = Schemas::new(plan.schema())?;
let ds_schema = ds.schema(self.s)?;
- let plan =
- self.plan_where_clause(ctx, &select.condition, plan, &schemas, &ds_schema)?;
+ let plan = self.plan_condition_time_range(
+ ctx.condition,
+ ctx.time_range,
+ plan,
+ &schemas,
+ &ds_schema,
+ )?;
plans.push(plan);
}
@@ -554,7 +552,7 @@ impl<'a> InfluxQLToLogicalPlan<'a> {
let schemas = Schemas::new(input.schema())?;
// Transform InfluxQL AST field expressions to a list of DataFusion expressions.
- let mut select_exprs = self.field_list_to_exprs(ctx, &input, fields, &schemas)?;
+ let mut select_exprs = self.field_list_to_exprs(&input, fields, &schemas)?;
if ctx.is_raw_distinct() {
// This is a special case, where exactly one column can be projected with a `DISTINCT`
@@ -910,14 +908,13 @@ impl<'a> InfluxQLToLogicalPlan<'a> {
/// Map the InfluxQL `SELECT` projection list into a list of DataFusion expressions.
fn field_list_to_exprs(
&self,
- ctx: &Context<'_>,
plan: &LogicalPlan,
fields: &[Field],
schemas: &Schemas,
) -> Result<Vec<Expr>> {
fields
.iter()
- .map(|field| self.field_to_df_expr(ctx, field, plan, schemas))
+ .map(|field| self.field_to_df_expr(field, plan, schemas))
.collect()
}
@@ -926,12 +923,11 @@ impl<'a> InfluxQLToLogicalPlan<'a> {
/// A [`Field`] is analogous to a column in a SQL `SELECT` projection.
fn field_to_df_expr(
&self,
- ctx: &Context<'_>,
field: &Field,
plan: &LogicalPlan,
schemas: &Schemas,
) -> Result<Expr> {
- let expr = self.expr_to_df_expr(ctx, ExprScope::Projection, &field.expr, schemas)?;
+ let expr = self.expr_to_df_expr(ExprScope::Projection, &field.expr, schemas)?;
let expr = planner_rewrite_expression::rewrite_field_expr(expr, schemas)?;
normalize_col(expr.alias(&field.name), plan)
}
@@ -939,45 +935,37 @@ impl<'a> InfluxQLToLogicalPlan<'a> {
/// Map an InfluxQL [`ConditionalExpression`] to a DataFusion [`Expr`].
fn conditional_to_df_expr(
&self,
- ctx: &Context<'_>,
iql: &ConditionalExpression,
schemas: &Schemas,
) -> Result<Expr> {
match iql {
ConditionalExpression::Expr(expr) => {
- self.expr_to_df_expr(ctx, ExprScope::Where, expr, schemas)
+ self.expr_to_df_expr(ExprScope::Where, expr, schemas)
}
ConditionalExpression::Binary(expr) => {
- self.binary_conditional_to_df_expr(ctx, expr, schemas)
+ self.binary_conditional_to_df_expr(expr, schemas)
}
- ConditionalExpression::Grouped(e) => self.conditional_to_df_expr(ctx, e, schemas),
+ ConditionalExpression::Grouped(e) => self.conditional_to_df_expr(e, schemas),
}
}
/// Map an InfluxQL binary conditional expression to a DataFusion [`Expr`].
fn binary_conditional_to_df_expr(
&self,
- ctx: &Context<'_>,
expr: &ConditionalBinary,
schemas: &Schemas,
) -> Result<Expr> {
let ConditionalBinary { lhs, op, rhs } = expr;
Ok(binary_expr(
- self.conditional_to_df_expr(ctx, lhs, schemas)?,
+ self.conditional_to_df_expr(lhs, schemas)?,
conditional_op_to_operator(*op)?,
- self.conditional_to_df_expr(ctx, rhs, schemas)?,
+ self.conditional_to_df_expr(rhs, schemas)?,
))
}
/// Map an InfluxQL [`IQLExpr`] to a DataFusion [`Expr`].
- fn expr_to_df_expr(
- &self,
- ctx: &Context<'_>,
- scope: ExprScope,
- iql: &IQLExpr,
- schemas: &Schemas,
- ) -> Result<Expr> {
+ fn expr_to_df_expr(&self, scope: ExprScope, iql: &IQLExpr, schemas: &Schemas) -> Result<Expr> {
let schema = &schemas.df_schema;
match iql {
// rewriter is expected to expand wildcard expressions
@@ -1058,9 +1046,9 @@ impl<'a> InfluxQLToLogicalPlan<'a> {
},
// A DISTINCT <ident> clause should have been replaced by `rewrite_statement`.
IQLExpr::Distinct(_) => error::internal("distinct expression"),
- IQLExpr::Call(call) => self.call_to_df_expr(ctx, scope, call, schemas),
- IQLExpr::Binary(expr) => self.arithmetic_expr_to_df_expr(ctx, scope, expr, schemas),
- IQLExpr::Nested(e) => self.expr_to_df_expr(ctx, scope, e, schemas),
+ IQLExpr::Call(call) => self.call_to_df_expr(scope, call, schemas),
+ IQLExpr::Binary(expr) => self.arithmetic_expr_to_df_expr(scope, expr, schemas),
+ IQLExpr::Nested(e) => self.expr_to_df_expr(scope, e, schemas),
}
}
@@ -1080,15 +1068,9 @@ impl<'a> InfluxQLToLogicalPlan<'a> {
/// > * <https://github.com/influxdata/influxdb_iox/issues/6939>
///
/// [docs]: https://docs.influxdata.com/influxdb/v1.8/query_language/functions/
- fn call_to_df_expr(
- &self,
- ctx: &Context<'_>,
- scope: ExprScope,
- call: &Call,
- schemas: &Schemas,
- ) -> Result<Expr> {
+ fn call_to_df_expr(&self, scope: ExprScope, call: &Call, schemas: &Schemas) -> Result<Expr> {
if is_scalar_math_function(call.name.as_str()) {
- return self.scalar_math_func_to_df_expr(ctx, scope, call, schemas);
+ return self.scalar_math_func_to_df_expr(scope, call, schemas);
}
match scope {
@@ -1100,13 +1082,12 @@ impl<'a> InfluxQLToLogicalPlan<'a> {
error::query(format!("invalid function call in condition: {name}"))
}
}
- ExprScope::Projection => self.function_to_df_expr(ctx, scope, call, schemas),
+ ExprScope::Projection => self.function_to_df_expr(scope, call, schemas),
}
}
fn function_to_df_expr(
&self,
- ctx: &Context<'_>,
scope: ExprScope,
call: &Call,
schemas: &Schemas,
@@ -1128,13 +1109,13 @@ impl<'a> InfluxQLToLogicalPlan<'a> {
// The DISTINCT function is handled as a `ProjectionType::RawDistinct`
// query, so the planner only needs to project the single column
// argument.
- "distinct" => self.expr_to_df_expr(ctx, scope, &args[0], schemas),
+ "distinct" => self.expr_to_df_expr(scope, &args[0], schemas),
"count" => {
let (expr, distinct) = match &args[0] {
IQLExpr::Call(c) if c.name == "distinct" => {
- (self.expr_to_df_expr(ctx, scope, &c.args[0], schemas)?, true)
+ (self.expr_to_df_expr(scope, &c.args[0], schemas)?, true)
}
- expr => (self.expr_to_df_expr(ctx, scope, expr, schemas)?, false),
+ expr => (self.expr_to_df_expr(scope, expr, schemas)?, false),
};
if let Expr::Literal(ScalarValue::Null) = expr {
return Ok(expr);
@@ -1150,7 +1131,7 @@ impl<'a> InfluxQLToLogicalPlan<'a> {
)))
}
"sum" | "stddev" | "mean" | "median" => {
- let expr = self.expr_to_df_expr(ctx, scope, &args[0], schemas)?;
+ let expr = self.expr_to_df_expr(scope, &args[0], schemas)?;
if let Expr::Literal(ScalarValue::Null) = expr {
return Ok(expr);
}
@@ -1165,7 +1146,7 @@ impl<'a> InfluxQLToLogicalPlan<'a> {
)))
}
name @ ("first" | "last" | "min" | "max") => {
- let expr = self.expr_to_df_expr(ctx, scope, &args[0], schemas)?;
+ let expr = self.expr_to_df_expr(scope, &args[0], schemas)?;
if let Expr::Literal(ScalarValue::Null) = expr {
return Ok(expr);
}
@@ -1191,7 +1172,6 @@ impl<'a> InfluxQLToLogicalPlan<'a> {
/// Map the InfluxQL scalar function call to a DataFusion scalar function expression.
fn scalar_math_func_to_df_expr(
&self,
- ctx: &Context<'_>,
scope: ExprScope,
call: &Call,
schemas: &Schemas,
@@ -1199,7 +1179,7 @@ impl<'a> InfluxQLToLogicalPlan<'a> {
let args = call
.args
.iter()
- .map(|e| self.expr_to_df_expr(ctx, scope, e, schemas))
+ .map(|e| self.expr_to_df_expr(scope, e, schemas))
.collect::<Result<Vec<Expr>>>()?;
match BuiltinScalarFunction::from_str(call.name.as_str())? {
@@ -1220,68 +1200,85 @@ impl<'a> InfluxQLToLogicalPlan<'a> {
/// Map an InfluxQL arithmetic expression to a DataFusion [`Expr`].
fn arithmetic_expr_to_df_expr(
&self,
- ctx: &Context<'_>,
scope: ExprScope,
expr: &Binary,
schemas: &Schemas,
) -> Result<Expr> {
Ok(binary_expr(
- self.expr_to_df_expr(ctx, scope, &expr.lhs, schemas)?,
+ self.expr_to_df_expr(scope, &expr.lhs, schemas)?,
binary_operator_to_df_operator(expr.op),
- self.expr_to_df_expr(ctx, scope, &expr.rhs, schemas)?,
+ self.expr_to_df_expr(scope, &expr.rhs, schemas)?,
))
}
+ fn plan_condition_time_range(
+ &self,
+ condition: Option<&ConditionalExpression>,
+ time_range: Option<TimeRange>,
+ plan: LogicalPlan,
+ schemas: &Schemas,
+ ds_schema: &DataSourceSchema<'_>,
+ ) -> Result<LogicalPlan> {
+ let filter_expr = condition
+ .map(|condition| {
+ let filter_expr = self.conditional_to_df_expr(condition, schemas)?;
+ planner_rewrite_expression::rewrite_conditional_expr(
+ self.s.execution_props(),
+ filter_expr,
+ schemas,
+ ds_schema,
+ )
+ })
+ .transpose()?;
+
+ let time_expr = time_range.map(time_range_to_df_expr).flatten();
+
+ let pb = LogicalPlanBuilder::from(plan);
+ match (time_expr, filter_expr) {
+ (Some(lhs), Some(rhs)) => pb.filter(lhs.and(rhs))?,
+ (Some(expr), None) | (None, Some(expr)) => pb.filter(expr)?,
+ (None, None) => pb,
+ }
+ .build()
+ }
+
/// Generate a logical plan that filters the existing plan based on the
- /// optional InfluxQL conditional expression.
+ /// InfluxQL [`WhereClause`] of a `SHOW` statement.
fn plan_where_clause(
&self,
- ctx: &Context<'_>,
- condition: &Option<WhereClause>,
plan: LogicalPlan,
+ condition: &Option<WhereClause>,
+ cutoff: MetadataCutoff,
schemas: &Schemas,
ds_schema: &DataSourceSchema<'_>,
) -> Result<LogicalPlan> {
- match condition {
- Some(where_clause) => {
- let rc = ReduceContext {
- now: Some(Timestamp::from(
- self.s.execution_props().query_execution_start_time,
- )),
- tz: ctx.tz,
- };
-
- let (cond, time_range) =
- split_cond(&rc, where_clause).map_err(error::map::expr_error)?;
-
- let filter_expr = if let Some(cond) = cond {
- let filter_expr = self.conditional_to_df_expr(ctx, &cond, schemas)?;
- Some(planner_rewrite_expression::rewrite_conditional_expr(
- self.s.execution_props(),
- filter_expr,
- schemas,
- ds_schema,
- )?)
- } else {
- None
- };
+ let start_time = Timestamp::from(self.s.execution_props().query_execution_start_time);
- let time_expr = if let Some(cond) = time_range {
- time_range_to_df_expr(cond)
- } else {
- None
+ let (cond, time_range) = condition
+ .as_ref()
+ .map(|where_clause| {
+ let rc = ReduceContext {
+ now: Some(start_time),
+ tz: None,
};
- let pb = LogicalPlanBuilder::from(plan);
- match (time_expr, filter_expr) {
- (Some(lhs), Some(rhs)) => pb.filter(lhs.and(rhs))?,
- (Some(expr), None) | (None, Some(expr)) => pb.filter(expr)?,
- (None, None) => pb,
+ split_cond(&rc, where_clause).map_err(error::map::expr_error)
+ })
+ .transpose()?
+ .unwrap_or_default();
+
+ // Add time restriction to logical plan if there isn't any.
+ let time_range = time_range.unwrap_or_else(|| TimeRange {
+ lower: Some(match cutoff {
+ MetadataCutoff::Absolute(dt) => dt.timestamp_nanos(),
+ MetadataCutoff::Relative(delta) => {
+ start_time.timestamp_nanos() - delta.as_nanos() as i64
}
- .build()
- }
- None => Ok(plan),
- }
+ }),
+ upper: None,
+ });
+
+ self.plan_condition_time_range(cond.as_ref(), Some(time_range), plan, schemas, ds_schema)
}
/// Generate a logical plan for the specified `DataSource`.
@@ -1459,13 +1456,12 @@ impl<'a> InfluxQLToLogicalPlan<'a> {
let ds = DataSource::Table(table.clone());
let ds_schema = ds.schema(self.s)?;
let plan = self.plan_where_clause(
- &Context::default(),
- &condition,
plan,
+ &condition,
+ metadata_cutoff,
&schemas,
&ds_schema,
)?;
- let plan = add_time_restriction(plan, metadata_cutoff)?;
let tags = table_schema
.iter()
@@ -1713,13 +1709,12 @@ impl<'a> InfluxQLToLogicalPlan<'a> {
let ds = DataSource::Table(table.clone());
let ds_schema = ds.schema(self.s)?;
let plan = self.plan_where_clause(
- &Context::default(),
- &show_tag_values.condition,
plan,
+ &show_tag_values.condition,
+ metadata_cutoff,
&schemas,
&ds_schema,
)?;
- let plan = add_time_restriction(plan, metadata_cutoff)?;
for key in keys {
let idx = plan
@@ -1820,13 +1815,12 @@ impl<'a> InfluxQLToLogicalPlan<'a> {
let ds = DataSource::Table(table.clone());
let ds_schema = ds.schema(self.s)?;
let plan = self.plan_where_clause(
- &Context::default(),
- &condition,
plan,
+ &condition,
+ metadata_cutoff,
&schemas,
&ds_schema,
)?;
- let plan = add_time_restriction(plan, metadata_cutoff)?;
let plan = LogicalPlanBuilder::from(plan)
.limit(0, Some(1))?
@@ -2237,37 +2231,6 @@ fn eval_with_key_clause<'a>(
}
}
-/// Add time restriction to logical plan if there isn't any.
-///
-/// This must used directly on top of a potential filter plan, e.g. the one produced by [`plan_where_clause`](InfluxQLToLogicalPlan::plan_where_clause).
-fn add_time_restriction(plan: LogicalPlan, cutoff: MetadataCutoff) -> Result<LogicalPlan> {
- let contains_time = if let LogicalPlan::Filter(filter) = &plan {
- let cols = filter.predicate.to_columns()?;
- cols.into_iter().any(|col| col.name == "time")
- } else {
- false
- };
-
- if contains_time {
- Ok(plan)
- } else {
- let cutoff_expr = match cutoff {
- MetadataCutoff::Absolute(dt) => lit_timestamp_nano(dt.timestamp_nanos()),
- MetadataCutoff::Relative(delta) => binary_expr(
- now(),
- Operator::Minus,
- lit(ScalarValue::IntervalMonthDayNano(Some(
- i128::try_from(delta.as_nanos())
- .map_err(|_| error::map::query("default timespan overflow"))?,
- ))),
- ),
- };
- LogicalPlanBuilder::from(plan)
- .filter(col("time").gt_eq(cutoff_expr))?
- .build()
- }
-}
-
/// Find distinct occurrences of `Expr::VarRef` expressions for
/// the `select`.
fn find_var_refs(select: &Select) -> BTreeSet<&VarRef> {
@@ -2477,54 +2440,44 @@ mod test {
Union [iox::measurement:Dictionary(Int32, Utf8), name:Dictionary(Int32, Utf8)]
Projection: Dictionary(Int32, Utf8("measurements")) AS iox::measurement, Dictionary(Int32, Utf8("all_types")) AS name [iox::measurement:Dictionary(Int32, Utf8), name:Dictionary(Int32, Utf8)]
Limit: skip=0, fetch=1 [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N]
- Filter: all_types.time >= now() - IntervalMonthDayNano("86400000000000") [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N]
- Filter: Boolean(false) [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N]
- TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N]
+ Filter: all_types.time >= TimestampNanosecond(1672444800000000000, None) AND Boolean(false) [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N]
+ TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N]
Projection: Dictionary(Int32, Utf8("measurements")) AS iox::measurement, Dictionary(Int32, Utf8("cpu")) AS name [iox::measurement:Dictionary(Int32, Utf8), name:Dictionary(Int32, Utf8)]
Limit: skip=0, fetch=1 [cpu:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), usage_idle:Float64;N, usage_system:Float64;N, usage_user:Float64;N]
- Filter: cpu.time >= now() - IntervalMonthDayNano("86400000000000") [cpu:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), usage_idle:Float64;N, usage_system:Float64;N, usage_user:Float64;N]
- Filter: Boolean(false) [cpu:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), usage_idle:Float64;N, usage_system:Float64;N, usage_user:Float64;N]
- TableScan: cpu [cpu:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), usage_idle:Float64;N, usage_system:Float64;N, usage_user:Float64;N]
+ Filter: cpu.time >= TimestampNanosecond(1672444800000000000, None) AND Boolean(false) [cpu:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), usage_idle:Float64;N, usage_system:Float64;N, usage_user:Float64;N]
+ TableScan: cpu [cpu:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), usage_idle:Float64;N, usage_system:Float64;N, usage_user:Float64;N]
Projection: Dictionary(Int32, Utf8("measurements")) AS iox::measurement, Dictionary(Int32, Utf8("data")) AS name [iox::measurement:Dictionary(Int32, Utf8), name:Dictionary(Int32, Utf8)]
Limit: skip=0, fetch=1 [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
- Filter: data.time >= now() - IntervalMonthDayNano("86400000000000") [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
- Filter: data.foo = Dictionary(Int32, Utf8("some_foo")) [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
- TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
+ Filter: data.time >= TimestampNanosecond(1672444800000000000, None) AND data.foo = Dictionary(Int32, Utf8("some_foo")) [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
+ TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
Projection: Dictionary(Int32, Utf8("measurements")) AS iox::measurement, Dictionary(Int32, Utf8("disk")) AS name [iox::measurement:Dictionary(Int32, Utf8), name:Dictionary(Int32, Utf8)]
Limit: skip=0, fetch=1 [bytes_free:Int64;N, bytes_used:Int64;N, device:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None)]
- Filter: disk.time >= now() - IntervalMonthDayNano("86400000000000") [bytes_free:Int64;N, bytes_used:Int64;N, device:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None)]
- Filter: Boolean(false) [bytes_free:Int64;N, bytes_used:Int64;N, device:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None)]
- TableScan: disk [bytes_free:Int64;N, bytes_used:Int64;N, device:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None)]
+ Filter: disk.time >= TimestampNanosecond(1672444800000000000, None) AND Boolean(false) [bytes_free:Int64;N, bytes_used:Int64;N, device:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None)]
+ TableScan: disk [bytes_free:Int64;N, bytes_used:Int64;N, device:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None)]
Projection: Dictionary(Int32, Utf8("measurements")) AS iox::measurement, Dictionary(Int32, Utf8("diskio")) AS name [iox::measurement:Dictionary(Int32, Utf8), name:Dictionary(Int32, Utf8)]
Limit: skip=0, fetch=1 [bytes_read:Int64;N, bytes_written:Int64;N, host:Dictionary(Int32, Utf8);N, is_local:Boolean;N, read_utilization:Float64;N, region:Dictionary(Int32, Utf8);N, status:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), write_utilization:Float64;N]
- Filter: diskio.time >= now() - IntervalMonthDayNano("86400000000000") [bytes_read:Int64;N, bytes_written:Int64;N, host:Dictionary(Int32, Utf8);N, is_local:Boolean;N, read_utilization:Float64;N, region:Dictionary(Int32, Utf8);N, status:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), write_utilization:Float64;N]
- Filter: Boolean(false) [bytes_read:Int64;N, bytes_written:Int64;N, host:Dictionary(Int32, Utf8);N, is_local:Boolean;N, read_utilization:Float64;N, region:Dictionary(Int32, Utf8);N, status:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), write_utilization:Float64;N]
- TableScan: diskio [bytes_read:Int64;N, bytes_written:Int64;N, host:Dictionary(Int32, Utf8);N, is_local:Boolean;N, read_utilization:Float64;N, region:Dictionary(Int32, Utf8);N, status:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), write_utilization:Float64;N]
+ Filter: diskio.time >= TimestampNanosecond(1672444800000000000, None) AND Boolean(false) [bytes_read:Int64;N, bytes_written:Int64;N, host:Dictionary(Int32, Utf8);N, is_local:Boolean;N, read_utilization:Float64;N, region:Dictionary(Int32, Utf8);N, status:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), write_utilization:Float64;N]
+ TableScan: diskio [bytes_read:Int64;N, bytes_written:Int64;N, host:Dictionary(Int32, Utf8);N, is_local:Boolean;N, read_utilization:Float64;N, region:Dictionary(Int32, Utf8);N, status:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), write_utilization:Float64;N]
Projection: Dictionary(Int32, Utf8("measurements")) AS iox::measurement, Dictionary(Int32, Utf8("merge_00")) AS name [iox::measurement:Dictionary(Int32, Utf8), name:Dictionary(Int32, Utf8)]
Limit: skip=0, fetch=1 [col0:Dictionary(Int32, Utf8);N, col1:Float64;N, col2:Boolean;N, col3:Utf8;N, time:Timestamp(Nanosecond, None)]
- Filter: merge_00.time >= now() - IntervalMonthDayNano("86400000000000") [col0:Dictionary(Int32, Utf8);N, col1:Float64;N, col2:Boolean;N, col3:Utf8;N, time:Timestamp(Nanosecond, None)]
- Filter: Boolean(false) [col0:Dictionary(Int32, Utf8);N, col1:Float64;N, col2:Boolean;N, col3:Utf8;N, time:Timestamp(Nanosecond, None)]
- TableScan: merge_00 [col0:Dictionary(Int32, Utf8);N, col1:Float64;N, col2:Boolean;N, col3:Utf8;N, time:Timestamp(Nanosecond, None)]
+ Filter: merge_00.time >= TimestampNanosecond(1672444800000000000, None) AND Boolean(false) [col0:Dictionary(Int32, Utf8);N, col1:Float64;N, col2:Boolean;N, col3:Utf8;N, time:Timestamp(Nanosecond, None)]
+ TableScan: merge_00 [col0:Dictionary(Int32, Utf8);N, col1:Float64;N, col2:Boolean;N, col3:Utf8;N, time:Timestamp(Nanosecond, None)]
Projection: Dictionary(Int32, Utf8("measurements")) AS iox::measurement, Dictionary(Int32, Utf8("merge_01")) AS name [iox::measurement:Dictionary(Int32, Utf8), name:Dictionary(Int32, Utf8)]
Limit: skip=0, fetch=1 [col0:Float64;N, col1:Dictionary(Int32, Utf8);N, col2:Utf8;N, col3:Boolean;N, time:Timestamp(Nanosecond, None)]
- Filter: merge_01.time >= now() - IntervalMonthDayNano("86400000000000") [col0:Float64;N, col1:Dictionary(Int32, Utf8);N, col2:Utf8;N, col3:Boolean;N, time:Timestamp(Nanosecond, None)]
- Filter: Boolean(false) [col0:Float64;N, col1:Dictionary(Int32, Utf8);N, col2:Utf8;N, col3:Boolean;N, time:Timestamp(Nanosecond, None)]
- TableScan: merge_01 [col0:Float64;N, col1:Dictionary(Int32, Utf8);N, col2:Utf8;N, col3:Boolean;N, time:Timestamp(Nanosecond, None)]
+ Filter: merge_01.time >= TimestampNanosecond(1672444800000000000, None) AND Boolean(false) [col0:Float64;N, col1:Dictionary(Int32, Utf8);N, col2:Utf8;N, col3:Boolean;N, time:Timestamp(Nanosecond, None)]
+ TableScan: merge_01 [col0:Float64;N, col1:Dictionary(Int32, Utf8);N, col2:Utf8;N, col3:Boolean;N, time:Timestamp(Nanosecond, None)]
Projection: Dictionary(Int32, Utf8("measurements")) AS iox::measurement, Dictionary(Int32, Utf8("temp_01")) AS name [iox::measurement:Dictionary(Int32, Utf8), name:Dictionary(Int32, Utf8)]
Limit: skip=0, fetch=1 [field_f64:Float64;N, field_i64:Int64;N, field_str:Utf8;N, field_u64:UInt64;N, shared_field0:Float64;N, shared_tag0:Dictionary(Int32, Utf8);N, shared_tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None)]
- Filter: temp_01.time >= now() - IntervalMonthDayNano("86400000000000") [field_f64:Float64;N, field_i64:Int64;N, field_str:Utf8;N, field_u64:UInt64;N, shared_field0:Float64;N, shared_tag0:Dictionary(Int32, Utf8);N, shared_tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None)]
- Filter: Boolean(false) [field_f64:Float64;N, field_i64:Int64;N, field_str:Utf8;N, field_u64:UInt64;N, shared_field0:Float64;N, shared_tag0:Dictionary(Int32, Utf8);N, shared_tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None)]
- TableScan: temp_01 [field_f64:Float64;N, field_i64:Int64;N, field_str:Utf8;N, field_u64:UInt64;N, shared_field0:Float64;N, shared_tag0:Dictionary(Int32, Utf8);N, shared_tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None)]
+ Filter: temp_01.time >= TimestampNanosecond(1672444800000000000, None) AND Boolean(false) [field_f64:Float64;N, field_i64:Int64;N, field_str:Utf8;N, field_u64:UInt64;N, shared_field0:Float64;N, shared_tag0:Dictionary(Int32, Utf8);N, shared_tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None)]
+ TableScan: temp_01 [field_f64:Float64;N, field_i64:Int64;N, field_str:Utf8;N, field_u64:UInt64;N, shared_field0:Float64;N, shared_tag0:Dictionary(Int32, Utf8);N, shared_tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None)]
Projection: Dictionary(Int32, Utf8("measurements")) AS iox::measurement, Dictionary(Int32, Utf8("temp_02")) AS name [iox::measurement:Dictionary(Int32, Utf8), name:Dictionary(Int32, Utf8)]
Limit: skip=0, fetch=1 [shared_field0:Int64;N, shared_tag0:Dictionary(Int32, Utf8);N, shared_tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None)]
- Filter: temp_02.time >= now() - IntervalMonthDayNano("86400000000000") [shared_field0:Int64;N, shared_tag0:Dictionary(Int32, Utf8);N, shared_tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None)]
- Filter: Boolean(false) [shared_field0:Int64;N, shared_tag0:Dictionary(Int32, Utf8);N, shared_tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None)]
- TableScan: temp_02 [shared_field0:Int64;N, shared_tag0:Dictionary(Int32, Utf8);N, shared_tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None)]
+ Filter: temp_02.time >= TimestampNanosecond(1672444800000000000, None) AND Boolean(false) [shared_field0:Int64;N, shared_tag0:Dictionary(Int32, Utf8);N, shared_tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None)]
+ TableScan: temp_02 [shared_field0:Int64;N, shared_tag0:Dictionary(Int32, Utf8);N, shared_tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None)]
Projection: Dictionary(Int32, Utf8("measurements")) AS iox::measurement, Dictionary(Int32, Utf8("temp_03")) AS name [iox::measurement:Dictionary(Int32, Utf8), name:Dictionary(Int32, Utf8)]
Limit: skip=0, fetch=1 [shared_field0:Utf8;N, shared_tag0:Dictionary(Int32, Utf8);N, shared_tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None)]
- Filter: temp_03.time >= now() - IntervalMonthDayNano("86400000000000") [shared_field0:Utf8;N, shared_tag0:Dictionary(Int32, Utf8);N, shared_tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None)]
- Filter: Boolean(false) [shared_field0:Utf8;N, shared_tag0:Dictionary(Int32, Utf8);N, shared_tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None)]
- TableScan: temp_03 [shared_field0:Utf8;N, shared_tag0:Dictionary(Int32, Utf8);N, shared_tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None)]
+ Filter: temp_03.time >= TimestampNanosecond(1672444800000000000, None) AND Boolean(false) [shared_field0:Utf8;N, shared_tag0:Dictionary(Int32, Utf8);N, shared_tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None)]
+ TableScan: temp_03 [shared_field0:Utf8;N, shared_tag0:Dictionary(Int32, Utf8);N, shared_tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None)]
"###);
assert_snapshot!(plan("SHOW MEASUREMENTS WHERE time > 1337"), @r###"
Sort: iox::measurement ASC NULLS LAST, name ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), name:Dictionary(Int32, Utf8)]
@@ -2590,81 +2543,71 @@ mod test {
Unnest: tagKey [tagKey:Utf8;N]
Projection: makearray(CASE WHEN tag0 > Int32(0) THEN Utf8("tag0") END, CASE WHEN tag1 > Int32(0) THEN Utf8("tag1") END) AS tagKey [tagKey:FixedSizeList(Field { name: "item", data_type: Utf8, nullable: true, dict_id: 0, dict_is_ordered: false, metadata: {} }, 2);N]
Aggregate: groupBy=[[]], aggr=[[SUM(CAST(all_types.tag0 IS NOT NULL AS UInt64)) AS tag0, SUM(CAST(all_types.tag1 IS NOT NULL AS UInt64)) AS tag1]] [tag0:UInt64;N, tag1:UInt64;N]
- Filter: all_types.time >= now() - IntervalMonthDayNano("86400000000000") [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N]
- Filter: Boolean(false) [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N]
- TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N]
+ Filter: all_types.time >= TimestampNanosecond(1672444800000000000, None) AND Boolean(false) [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N]
+ TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N]
Projection: Dictionary(Int32, Utf8("cpu")) AS iox::measurement, tagKey [iox::measurement:Dictionary(Int32, Utf8), tagKey:Utf8;N]
Filter: tagKey IS NOT NULL [tagKey:Utf8;N]
Unnest: tagKey [tagKey:Utf8;N]
Projection: makearray(CASE WHEN cpu > Int32(0) THEN Utf8("cpu") END, CASE WHEN host > Int32(0) THEN Utf8("host") END, CASE WHEN region > Int32(0) THEN Utf8("region") END) AS tagKey [tagKey:FixedSizeList(Field { name: "item", data_type: Utf8, nullable: true, dict_id: 0, dict_is_ordered: false, metadata: {} }, 3);N]
Aggregate: groupBy=[[]], aggr=[[SUM(CAST(cpu.cpu IS NOT NULL AS UInt64)) AS cpu, SUM(CAST(cpu.host IS NOT NULL AS UInt64)) AS host, SUM(CAST(cpu.region IS NOT NULL AS UInt64)) AS region]] [cpu:UInt64;N, host:UInt64;N, region:UInt64;N]
- Filter: cpu.time >= now() - IntervalMonthDayNano("86400000000000") [cpu:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), usage_idle:Float64;N, usage_system:Float64;N, usage_user:Float64;N]
- Filter: Boolean(false) [cpu:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), usage_idle:Float64;N, usage_system:Float64;N, usage_user:Float64;N]
- TableScan: cpu [cpu:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), usage_idle:Float64;N, usage_system:Float64;N, usage_user:Float64;N]
+ Filter: cpu.time >= TimestampNanosecond(1672444800000000000, None) AND Boolean(false) [cpu:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), usage_idle:Float64;N, usage_system:Float64;N, usage_user:Float64;N]
+ TableScan: cpu [cpu:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), usage_idle:Float64;N, usage_system:Float64;N, usage_user:Float64;N]
Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, tagKey [iox::measurement:Dictionary(Int32, Utf8), tagKey:Utf8;N]
Filter: tagKey IS NOT NULL [tagKey:Utf8;N]
Unnest: tagKey [tagKey:Utf8;N]
Projection: makearray(CASE WHEN bar > Int32(0) THEN Utf8("bar") END, CASE WHEN foo > Int32(0) THEN Utf8("foo") END) AS tagKey [tagKey:FixedSizeList(Field { name: "item", data_type: Utf8, nullable: true, dict_id: 0, dict_is_ordered: false, metadata: {} }, 2);N]
Aggregate: groupBy=[[]], aggr=[[SUM(CAST(data.bar IS NOT NULL AS UInt64)) AS bar, SUM(CAST(data.foo IS NOT NULL AS UInt64)) AS foo]] [bar:UInt64;N, foo:UInt64;N]
- Filter: data.time >= now() - IntervalMonthDayNano("86400000000000") [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
- Filter: data.foo = Dictionary(Int32, Utf8("some_foo")) [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
- TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
+ Filter: data.time >= TimestampNanosecond(1672444800000000000, None) AND data.foo = Dictionary(Int32, Utf8("some_foo")) [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
+ TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
Projection: Dictionary(Int32, Utf8("disk")) AS iox::measurement, tagKey [iox::measurement:Dictionary(Int32, Utf8), tagKey:Utf8;N]
Filter: tagKey IS NOT NULL [tagKey:Utf8;N]
Unnest: tagKey [tagKey:Utf8;N]
Projection: makearray(CASE WHEN device > Int32(0) THEN Utf8("device") END, CASE WHEN host > Int32(0) THEN Utf8("host") END, CASE WHEN region > Int32(0) THEN Utf8("region") END) AS tagKey [tagKey:FixedSizeList(Field { name: "item", data_type: Utf8, nullable: true, dict_id: 0, dict_is_ordered: false, metadata: {} }, 3);N]
Aggregate: groupBy=[[]], aggr=[[SUM(CAST(disk.device IS NOT NULL AS UInt64)) AS device, SUM(CAST(disk.host IS NOT NULL AS UInt64)) AS host, SUM(CAST(disk.region IS NOT NULL AS UInt64)) AS region]] [device:UInt64;N, host:UInt64;N, region:UInt64;N]
- Filter: disk.time >= now() - IntervalMonthDayNano("86400000000000") [bytes_free:Int64;N, bytes_used:Int64;N, device:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None)]
- Filter: Boolean(false) [bytes_free:Int64;N, bytes_used:Int64;N, device:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None)]
- TableScan: disk [bytes_free:Int64;N, bytes_used:Int64;N, device:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None)]
+ Filter: disk.time >= TimestampNanosecond(1672444800000000000, None) AND Boolean(false) [bytes_free:Int64;N, bytes_used:Int64;N, device:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None)]
+ TableScan: disk [bytes_free:Int64;N, bytes_used:Int64;N, device:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None)]
Projection: Dictionary(Int32, Utf8("diskio")) AS iox::measurement, tagKey [iox::measurement:Dictionary(Int32, Utf8), tagKey:Utf8;N]
Filter: tagKey IS NOT NULL [tagKey:Utf8;N]
Unnest: tagKey [tagKey:Utf8;N]
Projection: makearray(CASE WHEN host > Int32(0) THEN Utf8("host") END, CASE WHEN region > Int32(0) THEN Utf8("region") END, CASE WHEN status > Int32(0) THEN Utf8("status") END) AS tagKey [tagKey:FixedSizeList(Field { name: "item", data_type: Utf8, nullable: true, dict_id: 0, dict_is_ordered: false, metadata: {} }, 3);N]
Aggregate: groupBy=[[]], aggr=[[SUM(CAST(diskio.host IS NOT NULL AS UInt64)) AS host, SUM(CAST(diskio.region IS NOT NULL AS UInt64)) AS region, SUM(CAST(diskio.status IS NOT NULL AS UInt64)) AS status]] [host:UInt64;N, region:UInt64;N, status:UInt64;N]
- Filter: diskio.time >= now() - IntervalMonthDayNano("86400000000000") [bytes_read:Int64;N, bytes_written:Int64;N, host:Dictionary(Int32, Utf8);N, is_local:Boolean;N, read_utilization:Float64;N, region:Dictionary(Int32, Utf8);N, status:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), write_utilization:Float64;N]
- Filter: Boolean(false) [bytes_read:Int64;N, bytes_written:Int64;N, host:Dictionary(Int32, Utf8);N, is_local:Boolean;N, read_utilization:Float64;N, region:Dictionary(Int32, Utf8);N, status:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), write_utilization:Float64;N]
- TableScan: diskio [bytes_read:Int64;N, bytes_written:Int64;N, host:Dictionary(Int32, Utf8);N, is_local:Boolean;N, read_utilization:Float64;N, region:Dictionary(Int32, Utf8);N, status:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), write_utilization:Float64;N]
+ Filter: diskio.time >= TimestampNanosecond(1672444800000000000, None) AND Boolean(false) [bytes_read:Int64;N, bytes_written:Int64;N, host:Dictionary(Int32, Utf8);N, is_local:Boolean;N, read_utilization:Float64;N, region:Dictionary(Int32, Utf8);N, status:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), write_utilization:Float64;N]
+ TableScan: diskio [bytes_read:Int64;N, bytes_written:Int64;N, host:Dictionary(Int32, Utf8);N, is_local:Boolean;N, read_utilization:Float64;N, region:Dictionary(Int32, Utf8);N, status:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), write_utilization:Float64;N]
Projection: Dictionary(Int32, Utf8("merge_00")) AS iox::measurement, tagKey [iox::measurement:Dictionary(Int32, Utf8), tagKey:Utf8;N]
Filter: tagKey IS NOT NULL [tagKey:Utf8;N]
Unnest: tagKey [tagKey:Utf8;N]
Projection: makearray(CASE WHEN col0 > Int32(0) THEN Utf8("col0") END) AS tagKey [tagKey:FixedSizeList(Field { name: "item", data_type: Utf8, nullable: true, dict_id: 0, dict_is_ordered: false, metadata: {} }, 1);N]
Aggregate: groupBy=[[]], aggr=[[SUM(CAST(merge_00.col0 IS NOT NULL AS UInt64)) AS col0]] [col0:UInt64;N]
- Filter: merge_00.time >= now() - IntervalMonthDayNano("86400000000000") [col0:Dictionary(Int32, Utf8);N, col1:Float64;N, col2:Boolean;N, col3:Utf8;N, time:Timestamp(Nanosecond, None)]
- Filter: Boolean(false) [col0:Dictionary(Int32, Utf8);N, col1:Float64;N, col2:Boolean;N, col3:Utf8;N, time:Timestamp(Nanosecond, None)]
- TableScan: merge_00 [col0:Dictionary(Int32, Utf8);N, col1:Float64;N, col2:Boolean;N, col3:Utf8;N, time:Timestamp(Nanosecond, None)]
+ Filter: merge_00.time >= TimestampNanosecond(1672444800000000000, None) AND Boolean(false) [col0:Dictionary(Int32, Utf8);N, col1:Float64;N, col2:Boolean;N, col3:Utf8;N, time:Timestamp(Nanosecond, None)]
+ TableScan: merge_00 [col0:Dictionary(Int32, Utf8);N, col1:Float64;N, col2:Boolean;N, col3:Utf8;N, time:Timestamp(Nanosecond, None)]
Projection: Dictionary(Int32, Utf8("merge_01")) AS iox::measurement, tagKey [iox::measurement:Dictionary(Int32, Utf8), tagKey:Utf8;N]
Filter: tagKey IS NOT NULL [tagKey:Utf8;N]
Unnest: tagKey [tagKey:Utf8;N]
Projection: makearray(CASE WHEN col1 > Int32(0) THEN Utf8("col1") END) AS tagKey [tagKey:FixedSizeList(Field { name: "item", data_type: Utf8, nullable: true, dict_id: 0, dict_is_ordered: false, metadata: {} }, 1);N]
Aggregate: groupBy=[[]], aggr=[[SUM(CAST(merge_01.col1 IS NOT NULL AS UInt64)) AS col1]] [col1:UInt64;N]
- Filter: merge_01.time >= now() - IntervalMonthDayNano("86400000000000") [col0:Float64;N, col1:Dictionary(Int32, Utf8);N, col2:Utf8;N, col3:Boolean;N, time:Timestamp(Nanosecond, None)]
- Filter: Boolean(false) [col0:Float64;N, col1:Dictionary(Int32, Utf8);N, col2:Utf8;N, col3:Boolean;N, time:Timestamp(Nanosecond, None)]
- TableScan: merge_01 [col0:Float64;N, col1:Dictionary(Int32, Utf8);N, col2:Utf8;N, col3:Boolean;N, time:Timestamp(Nanosecond, None)]
+ Filter: merge_01.time >= TimestampNanosecond(1672444800000000000, None) AND Boolean(false) [col0:Float64;N, col1:Dictionary(Int32, Utf8);N, col2:Utf8;N, col3:Boolean;N, time:Timestamp(Nanosecond, None)]
+ TableScan: merge_01 [col0:Float64;N, col1:Dictionary(Int32, Utf8);N, col2:Utf8;N, col3:Boolean;N, time:Timestamp(Nanosecond, None)]
Projection: Dictionary(Int32, Utf8("temp_01")) AS iox::measurement, tagKey [iox::measurement:Dictionary(Int32, Utf8), tagKey:Utf8;N]
Filter: tagKey IS NOT NULL [tagKey:Utf8;N]
Unnest: tagKey [tagKey:Utf8;N]
Projection: makearray(CASE WHEN shared_tag0 > Int32(0) THEN Utf8("shared_tag0") END, CASE WHEN shared_tag1 > Int32(0) THEN Utf8("shared_tag1") END) AS tagKey [tagKey:FixedSizeList(Field { name: "item", data_type: Utf8, nullable: true, dict_id: 0, dict_is_ordered: false, metadata: {} }, 2);N]
Aggregate: groupBy=[[]], aggr=[[SUM(CAST(temp_01.shared_tag0 IS NOT NULL AS UInt64)) AS shared_tag0, SUM(CAST(temp_01.shared_tag1 IS NOT NULL AS UInt64)) AS shared_tag1]] [shared_tag0:UInt64;N, shared_tag1:UInt64;N]
- Filter: temp_01.time >= now() - IntervalMonthDayNano("86400000000000") [field_f64:Float64;N, field_i64:Int64;N, field_str:Utf8;N, field_u64:UInt64;N, shared_field0:Float64;N, shared_tag0:Dictionary(Int32, Utf8);N, shared_tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None)]
- Filter: Boolean(false) [field_f64:Float64;N, field_i64:Int64;N, field_str:Utf8;N, field_u64:UInt64;N, shared_field0:Float64;N, shared_tag0:Dictionary(Int32, Utf8);N, shared_tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None)]
- TableScan: temp_01 [field_f64:Float64;N, field_i64:Int64;N, field_str:Utf8;N, field_u64:UInt64;N, shared_field0:Float64;N, shared_tag0:Dictionary(Int32, Utf8);N, shared_tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None)]
+ Filter: temp_01.time >= TimestampNanosecond(1672444800000000000, None) AND Boolean(false) [field_f64:Float64;N, field_i64:Int64;N, field_str:Utf8;N, field_u64:UInt64;N, shared_field0:Float64;N, shared_tag0:Dictionary(Int32, Utf8);N, shared_tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None)]
+ TableScan: temp_01 [field_f64:Float64;N, field_i64:Int64;N, field_str:Utf8;N, field_u64:UInt64;N, shared_field0:Float64;N, shared_tag0:Dictionary(Int32, Utf8);N, shared_tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None)]
Projection: Dictionary(Int32, Utf8("temp_02")) AS iox::measurement, tagKey [iox::measurement:Dictionary(Int32, Utf8), tagKey:Utf8;N]
Filter: tagKey IS NOT NULL [tagKey:Utf8;N]
Unnest: tagKey [tagKey:Utf8;N]
Projection: makearray(CASE WHEN shared_tag0 > Int32(0) THEN Utf8("shared_tag0") END, CASE WHEN shared_tag1 > Int32(0) THEN Utf8("shared_tag1") END) AS tagKey [tagKey:FixedSizeList(Field { name: "item", data_type: Utf8, nullable: true, dict_id: 0, dict_is_ordered: false, metadata: {} }, 2);N]
Aggregate: groupBy=[[]], aggr=[[SUM(CAST(temp_02.shared_tag0 IS NOT NULL AS UInt64)) AS shared_tag0, SUM(CAST(temp_02.shared_tag1 IS NOT NULL AS UInt64)) AS shared_tag1]] [shared_tag0:UInt64;N, shared_tag1:UInt64;N]
- Filter: temp_02.time >= now() - IntervalMonthDayNano("86400000000000") [shared_field0:Int64;N, shared_tag0:Dictionary(Int32, Utf8);N, shared_tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None)]
- Filter: Boolean(false) [shared_field0:Int64;N, shared_tag0:Dictionary(Int32, Utf8);N, shared_tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None)]
- TableScan: temp_02 [shared_field0:Int64;N, shared_tag0:Dictionary(Int32, Utf8);N, shared_tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None)]
+ Filter: temp_02.time >= TimestampNanosecond(1672444800000000000, None) AND Boolean(false) [shared_field0:Int64;N, shared_tag0:Dictionary(Int32, Utf8);N, shared_tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None)]
+ TableScan: temp_02 [shared_field0:Int64;N, shared_tag0:Dictionary(Int32, Utf8);N, shared_tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None)]
Projection: Dictionary(Int32, Utf8("temp_03")) AS iox::measurement, tagKey [iox::measurement:Dictionary(Int32, Utf8), tagKey:Utf8;N]
Filter: tagKey IS NOT NULL [tagKey:Utf8;N]
Unnest: tagKey [tagKey:Utf8;N]
Projection: makearray(CASE WHEN shared_tag0 > Int32(0) THEN Utf8("shared_tag0") END, CASE WHEN shared_tag1 > Int32(0) THEN Utf8("shared_tag1") END) AS tagKey [tagKey:FixedSizeList(Field { name: "item", data_type: Utf8, nullable: true, dict_id: 0, dict_is_ordered: false, metadata: {} }, 2);N]
Aggregate: groupBy=[[]], aggr=[[SUM(CAST(temp_03.shared_tag0 IS NOT NULL AS UInt64)) AS shared_tag0, SUM(CAST(temp_03.shared_tag1 IS NOT NULL AS UInt64)) AS shared_tag1]] [shared_tag0:UInt64;N, shared_tag1:UInt64;N]
- Filter: temp_03.time >= now() - IntervalMonthDayNano("86400000000000") [shared_field0:Utf8;N, shared_tag0:Dictionary(Int32, Utf8);N, shared_tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None)]
- Filter: Boolean(false) [shared_field0:Utf8;N, shared_tag0:Dictionary(Int32, Utf8);N, shared_tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None)]
- TableScan: temp_03 [shared_field0:Utf8;N, shared_tag0:Dictionary(Int32, Utf8);N, shared_tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None)]
+ Filter: temp_03.time >= TimestampNanosecond(1672444800000000000, None) AND Boolean(false) [shared_field0:Utf8;N, shared_tag0:Dictionary(Int32, Utf8);N, shared_tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None)]
+ TableScan: temp_03 [shared_field0:Utf8;N, shared_tag0:Dictionary(Int32, Utf8);N, shared_tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None)]
"###);
assert_snapshot!(plan("SHOW TAG KEYS WHERE time > 1337"), @r###"
Sort: iox::measurement ASC NULLS LAST, tagKey ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), tagKey:Utf8;N]
@@ -2749,7 +2692,7 @@ mod test {
Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, Dictionary(Int32, Utf8("bar")) AS key, data.bar AS value [iox::measurement:Dictionary(Int32, Utf8), key:Dictionary(Int32, Utf8), value:Dictionary(Int32, Utf8);N]
Distinct: [bar:Dictionary(Int32, Utf8);N]
Projection: data.bar [bar:Dictionary(Int32, Utf8);N]
- Filter: data.time >= now() - IntervalMonthDayNano("86400000000000") [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
+ Filter: data.time >= TimestampNanosecond(1672444800000000000, None) [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
"###);
assert_snapshot!(plan("SHOW TAG VALUES WITH KEY = bar LIMIT 1 OFFSET 2"), @r###"
@@ -2761,7 +2704,7 @@ mod test {
Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, Dictionary(Int32, Utf8("bar")) AS key, data.bar AS value [iox::measurement:Dictionary(Int32, Utf8), key:Dictionary(Int32, Utf8), value:Dictionary(Int32, Utf8);N]
Distinct: [bar:Dictionary(Int32, Utf8);N]
Projection: data.bar [bar:Dictionary(Int32, Utf8);N]
- Filter: data.time >= now() - IntervalMonthDayNano("86400000000000") [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
+ Filter: data.time >= TimestampNanosecond(1672444800000000000, None) [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
"###);
assert_snapshot!(plan("SHOW TAG VALUES WITH KEY = bar WHERE foo = 'some_foo'"), @r###"
@@ -2769,9 +2712,8 @@ mod test {
Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, Dictionary(Int32, Utf8("bar")) AS key, data.bar AS value [iox::measurement:Dictionary(Int32, Utf8), key:Dictionary(Int32, Utf8), value:Dictionary(Int32, Utf8);N]
Distinct: [bar:Dictionary(Int32, Utf8);N]
Projection: data.bar [bar:Dictionary(Int32, Utf8);N]
- Filter: data.time >= now() - IntervalMonthDayNano("86400000000000") [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
- Filter: data.foo = Dictionary(Int32, Utf8("some_foo")) [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
- TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
+ Filter: data.time >= TimestampNanosecond(1672444800000000000, None) AND data.foo = Dictionary(Int32, Utf8("some_foo")) [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
+ TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
"###);
assert_snapshot!(plan("SHOW TAG VALUES WITH KEY = bar WHERE time > 1337"), @r###"
Sort: iox::measurement ASC NULLS LAST, key ASC NULLS LAST, value ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), key:Dictionary(Int32, Utf8), value:Dictionary(Int32, Utf8);N]
diff --git a/iox_query_influxql/src/plan/rewriter.rs b/iox_query_influxql/src/plan/rewriter.rs
index ac85fa58b0..438e9d385d 100644
--- a/iox_query_influxql/src/plan/rewriter.rs
+++ b/iox_query_influxql/src/plan/rewriter.rs
@@ -19,6 +19,8 @@ use influxdb_influxql_parser::select::{
Dimension, FillClause, FromMeasurementClause, GroupByClause, MeasurementSelection,
SelectStatement,
};
+use influxdb_influxql_parser::time_range::{split_cond, ReduceContext};
+use influxdb_influxql_parser::timestamp::Timestamp;
use itertools::Itertools;
use schema::InfluxColumnType;
use std::collections::{BTreeSet, HashMap, HashSet};
@@ -98,6 +100,19 @@ impl RewriteSelect {
let (fields, group_by) = self.expand_projection(s, stmt, &from, &tag_set)?;
let condition = self.condition_resolve_types(s, stmt, &from)?;
+ let (condition, time_range) = match condition {
+ Some(where_clause) => {
+ let rc = ReduceContext {
+ now: Some(Timestamp::from(
+ s.execution_props().query_execution_start_time,
+ )),
+ tz: stmt.timezone.map(|tz| *tz),
+ };
+ split_cond(&rc, &where_clause).map_err(error::map::expr_error)?
+ }
+ None => (None, None),
+ };
+
let SelectStatementInfo { projection_type } =
select_statement_info(&fields, &group_by, stmt.fill)?;
@@ -119,6 +134,7 @@ impl RewriteSelect {
fields,
from,
condition,
+ time_range,
group_by,
tag_set,
fill,
|
c2de92afd2cac0ab9510822b02cd799f79302a77
|
wiedld
|
2023-06-21 09:47:28
|
remove any callback or Action handling from InstrumentedDiskProtection.
|
Will be implemented later in stages.
| null |
refactor: remove any callback or Action handling from InstrumentedDiskProtection.
Will be implemented later in stages.
|
diff --git a/tracker/src/disk_protection.rs b/tracker/src/disk_protection.rs
index 547b78e23e..21bf8b27a2 100644
--- a/tracker/src/disk_protection.rs
+++ b/tracker/src/disk_protection.rs
@@ -45,40 +45,6 @@ impl DiskProtectionMetrics {
}
}
-/// Protective action taken, per each cycle of background task
-struct DiskProtectionAction {
- /// Function used to check if action should be triggered.
- trigger: Box<dyn FnMut(u64, DiskProtectionState) -> bool + Send + Sync>,
- /// Callback action taken.
- callback: Option<Box<dyn FnMut() + Send + Sync>>,
- /// Next state.
- next_state: DiskProtectionState,
-}
-
-impl DiskProtectionAction {
- /// Perform the protection action as per the [`DiskProtectionAction`] contract.
- pub(crate) fn check_trigger(&mut self, measured: u64, curr_state: &Mutex<DiskProtectionState>) {
- let mut curr_state = curr_state.lock();
- match (&mut self.callback, (self.trigger)(measured, *curr_state)) {
- (None, _) => {}
- (Some(_), false) => {}
- (Some(callback), true) => {
- *curr_state = self.next_state;
- callback();
- }
- }
- }
-}
-
-#[derive(Copy, Clone, PartialEq)]
-/// Current state of disk protection.
-enum DiskProtectionState {
- /// DiskProtection has activated by triggering the appropriate calllback.
- Activated,
- /// DiskProtection is not activated, but is still watching (and checking) the metrics.
- Watching,
-}
-
/// Disk Protection instrument.
pub struct InstrumentedDiskProtection {
/// How often to perform the disk protection check.
@@ -87,12 +53,6 @@ pub struct InstrumentedDiskProtection {
metrics: DiskProtectionMetrics,
/// The handle to terminate the background task.
background_task: Mutex<Option<JoinHandle<()>>>,
- /// Current state of disk protection.
- state: Mutex<DiskProtectionState>,
- /// Callback triggered when disk protection is enacted.
- callback_on_protection_begin: tokio::sync::Mutex<DiskProtectionAction>,
- /// Callback triggered when disk protection has ended.
- callback_on_protection_end: tokio::sync::Mutex<DiskProtectionAction>,
}
impl std::fmt::Debug for InstrumentedDiskProtection {
@@ -107,9 +67,6 @@ impl InstrumentedDiskProtection {
registry: &metric::Registry,
attributes: impl Into<Attributes> + Send,
interval_duration: Duration,
- disk_threshold: u64,
- callback_on_protection_begin: Option<Box<dyn FnMut() + Send + Sync>>,
- callback_on_protection_end: Option<Box<dyn FnMut() + Send + Sync>>,
) -> Self {
let metrics = DiskProtectionMetrics::new(registry, attributes);
@@ -117,21 +74,6 @@ impl InstrumentedDiskProtection {
interval_duration,
metrics,
background_task: Default::default(),
- state: Mutex::new(DiskProtectionState::Watching),
- callback_on_protection_begin: tokio::sync::Mutex::new(DiskProtectionAction {
- trigger: Box::new(move |curr_metric: u64, curr_state| {
- curr_metric <= disk_threshold && curr_state == DiskProtectionState::Watching
- }),
- callback: callback_on_protection_begin,
- next_state: DiskProtectionState::Activated,
- }),
- callback_on_protection_end: tokio::sync::Mutex::new(DiskProtectionAction {
- trigger: Box::new(move |curr_metric: u64, curr_state| {
- curr_metric > disk_threshold && curr_state == DiskProtectionState::Activated
- }),
- callback: callback_on_protection_end,
- next_state: DiskProtectionState::Watching,
- }),
}
}
@@ -162,17 +104,7 @@ impl InstrumentedDiskProtection {
system.refresh_all();
- // Protective actions based upon available_disk_percentage.
- let available_disk_percentage =
- self.metrics.measure_available_disk_space_percent(&system);
- self.callback_on_protection_begin
- .lock()
- .await
- .check_trigger(available_disk_percentage, &self.state);
- self.callback_on_protection_end
- .lock()
- .await
- .check_trigger(available_disk_percentage, &self.state);
+ self.metrics.measure_available_disk_space_percent(&system);
}
}
}
@@ -186,8 +118,6 @@ impl Drop for InstrumentedDiskProtection {
#[cfg(test)]
mod tests {
- use std::sync::atomic::{AtomicBool, Ordering};
-
use metric::Metric;
use super::*;
@@ -201,14 +131,8 @@ mod tests {
impl MockAnyStruct {
pub(crate) async fn new(registry: &metric::Registry, duration: Duration) -> Self {
- let disk_protection = InstrumentedDiskProtection::new(
- registry,
- &[("test", "mock")],
- duration,
- 10_u64,
- None,
- None,
- );
+ let disk_protection =
+ InstrumentedDiskProtection::new(registry, &[("test", "mock")], duration);
disk_protection.start().await;
Self
@@ -228,55 +152,4 @@ mod tests {
assert!(recorded_metric > 0_u64);
}
-
- #[tokio::test]
- async fn test_callback_is_triggered() {
- let registry = Arc::new(metric::Registry::new());
- let duration = Duration::from_secs(1);
-
- struct MockStructTrackCallback {
- callback_triggered: AtomicBool,
- }
-
- impl MockStructTrackCallback {
- pub(crate) async fn new(registry: &metric::Registry, duration: Duration) -> Arc<Self> {
- let mock = Arc::new(Self {
- callback_triggered: AtomicBool::new(false),
- });
- let mock_for_cb = Arc::clone(&mock);
-
- let disk_protection = InstrumentedDiskProtection::new(
- registry,
- &[("test", "mock")],
- duration,
- 100_u64,
- Some(Box::new(move || {
- mock_for_cb.callback();
- })),
- None,
- );
- disk_protection.start().await;
-
- mock
- }
-
- fn callback(&self) {
- self.callback_triggered.store(true, Ordering::SeqCst);
- }
- }
-
- let mock = MockStructTrackCallback::new(®istry, duration).await;
-
- tokio::time::sleep(2 * duration).await;
-
- let recorded_metric = registry
- .get_instrument::<Metric<U64Gauge>>("disk_protection_free_disk_space")
- .expect("metric should exist")
- .get_observer(&Attributes::from(&[("test", "mock")]))
- .expect("metric should have labels")
- .fetch();
-
- assert!(recorded_metric > 0_u64);
- assert!(mock.callback_triggered.load(Ordering::SeqCst));
- }
}
|
03ea565802cdcd4814de3dfc776fcdb07c1f987f
|
Trevor Hilton
|
2024-12-27 12:42:30
|
cli arg to specify max parquet fanout (#25714)
|
This allows the `max_parquet_fanout` to be specified in the CLI for the `influxdb3 serve` command. This could be done previously via the `--datafusion-config` CLI argument, but the drawbacks to that were:
1. that is a fairly advanced option given the available key/value pairs are not well documented
2. if `iox.max_parquet_fanout` was not provided to that argument, the default would be set to `40`
This PR maintains the existing `--datafusion-config` CLI argument (with one caveat, see below) which allows users to provide a set key/value pairs that will be used to build the internal DataFusion config, but in addition provides the `--datafusion-max-parquet-fanout` argument:
```
--datafusion-max-parquet-fanout <MAX_PARQUET_FANOUT>
When multiple parquet files are required in a sorted way (e.g. for de-duplication), we have two options:
1. **In-mem sorting:** Put them into `datafusion.target_partitions` DataFusion partitions. This limits the fan-out, but requires that we potentially chain multiple parquet files into a single DataFusion partition. Since chaining sorted data does NOT automatically result in sorted data (e.g. AB-AB is not sorted), we need to preform an in-memory sort using `SortExec` afterwards. This is expensive. 2. **Fan-out:** Instead of chaining files within DataFusion partitions, we can accept a fan-out beyond `target_partitions`. This prevents in-memory sorting but may result in OOMs (out-of-memory) if the fan-out is too large.
We try to pick option 2 up to a certain number of files, which is configured by this setting.
[env: INFLUXDB3_DATAFUSION_MAX_PARQUET_FANOUT=]
[default: 1000]
```
with the default value of `1000`, which will override the core `iox_query` default of `40`.
A test was added to check that this is propagated down to the `IOxSessionContext` that is used during queries.
The only change to the `datafusion-config` CLI argument was to rename `INFLUXDB_IOX` in the environment variable to `INFLUXDB3`:
```
--datafusion-config <DATAFUSION_CONFIG>
Provide custom configuration to DataFusion as a comma-separated list of key:value pairs.
# Example ```text --datafusion-config "datafusion.key1:value1, datafusion.key2:value2" ```
[env: INFLUXDB3_DATAFUSION_CONFIG=]
[default: ]
```
| null |
feat: cli arg to specify max parquet fanout (#25714)
This allows the `max_parquet_fanout` to be specified in the CLI for the `influxdb3 serve` command. This could be done previously via the `--datafusion-config` CLI argument, but the drawbacks to that were:
1. that is a fairly advanced option given the available key/value pairs are not well documented
2. if `iox.max_parquet_fanout` was not provided to that argument, the default would be set to `40`
This PR maintains the existing `--datafusion-config` CLI argument (with one caveat, see below) which allows users to provide a set key/value pairs that will be used to build the internal DataFusion config, but in addition provides the `--datafusion-max-parquet-fanout` argument:
```
--datafusion-max-parquet-fanout <MAX_PARQUET_FANOUT>
When multiple parquet files are required in a sorted way (e.g. for de-duplication), we have two options:
1. **In-mem sorting:** Put them into `datafusion.target_partitions` DataFusion partitions. This limits the fan-out, but requires that we potentially chain multiple parquet files into a single DataFusion partition. Since chaining sorted data does NOT automatically result in sorted data (e.g. AB-AB is not sorted), we need to preform an in-memory sort using `SortExec` afterwards. This is expensive. 2. **Fan-out:** Instead of chaining files within DataFusion partitions, we can accept a fan-out beyond `target_partitions`. This prevents in-memory sorting but may result in OOMs (out-of-memory) if the fan-out is too large.
We try to pick option 2 up to a certain number of files, which is configured by this setting.
[env: INFLUXDB3_DATAFUSION_MAX_PARQUET_FANOUT=]
[default: 1000]
```
with the default value of `1000`, which will override the core `iox_query` default of `40`.
A test was added to check that this is propagated down to the `IOxSessionContext` that is used during queries.
The only change to the `datafusion-config` CLI argument was to rename `INFLUXDB_IOX` in the environment variable to `INFLUXDB3`:
```
--datafusion-config <DATAFUSION_CONFIG>
Provide custom configuration to DataFusion as a comma-separated list of key:value pairs.
# Example ```text --datafusion-config "datafusion.key1:value1, datafusion.key2:value2" ```
[env: INFLUXDB3_DATAFUSION_CONFIG=]
[default: ]
```
|
diff --git a/Cargo.lock b/Cargo.lock
index 5eb65621d9..527a0e6eda 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -2943,11 +2943,14 @@ name = "influxdb3_clap_blocks"
version = "0.1.0"
dependencies = [
"clap",
+ "datafusion",
"futures",
"humantime",
+ "iox_query",
"libc",
"observability_deps",
"paste",
+ "test-log",
"tokio",
]
diff --git a/influxdb3/src/commands/serve.rs b/influxdb3/src/commands/serve.rs
index 87f235aab5..3f5f16fd25 100644
--- a/influxdb3/src/commands/serve.rs
+++ b/influxdb3/src/commands/serve.rs
@@ -12,7 +12,7 @@ use influxdb3_cache::{
meta_cache::MetaCacheProvider,
parquet_cache::create_cached_obj_store_and_oracle,
};
-use influxdb3_clap_blocks::tokio::TokioDatafusionConfig;
+use influxdb3_clap_blocks::{datafusion::IoxQueryDatafusionConfig, tokio::TokioDatafusionConfig};
use influxdb3_process::{
build_malloc_conf, setup_metric_registry, INFLUXDB3_GIT_HASH, INFLUXDB3_VERSION, PROCESS_UUID,
};
@@ -36,8 +36,8 @@ use object_store::ObjectStore;
use observability_deps::tracing::*;
use panic_logging::SendPanicsToTracing;
use parquet_file::storage::{ParquetStorage, StorageId};
-use std::{collections::HashMap, path::Path, str::FromStr};
use std::{num::NonZeroUsize, sync::Arc};
+use std::{path::Path, str::FromStr};
use thiserror::Error;
use tokio::net::TcpListener;
use tokio::time::Instant;
@@ -112,6 +112,10 @@ pub struct Config {
#[clap(flatten)]
pub(crate) tokio_datafusion_config: TokioDatafusionConfig,
+ /// iox_query extended DataFusion config
+ #[clap(flatten)]
+ pub(crate) iox_query_datafusion_config: IoxQueryDatafusionConfig,
+
/// Maximum size of HTTP requests.
#[clap(
long = "max-http-request-size",
@@ -152,16 +156,6 @@ pub struct Config {
)]
pub exec_mem_pool_bytes: MemorySize,
- /// DataFusion config.
- #[clap(
- long = "datafusion-config",
- env = "INFLUXDB_IOX_DATAFUSION_CONFIG",
- default_value = "",
- value_parser = parse_datafusion_config,
- action
- )]
- pub datafusion_config: HashMap<String, String>,
-
/// bearer token to be set for requests
#[clap(long = "bearer-token", env = "INFLUXDB3_BEARER_TOKEN", action)]
pub bearer_token: Option<String>,
@@ -514,7 +508,7 @@ pub async fn command(config: Config) -> Result<()> {
write_buffer: Arc::clone(&write_buffer),
exec: Arc::clone(&exec),
metrics: Arc::clone(&metrics),
- datafusion_config: Arc::new(config.datafusion_config),
+ datafusion_config: Arc::new(config.iox_query_datafusion_config.build()),
query_log_size: config.query_log_size,
telemetry_store: Arc::clone(&telemetry_store),
sys_events_store: Arc::clone(&sys_events_store),
@@ -572,34 +566,3 @@ async fn setup_telemetry_store(
)
.await
}
-
-fn parse_datafusion_config(
- s: &str,
-) -> Result<HashMap<String, String>, Box<dyn std::error::Error + Send + Sync + 'static>> {
- let s = s.trim();
- if s.is_empty() {
- return Ok(HashMap::with_capacity(0));
- }
-
- let mut out = HashMap::new();
- for part in s.split(',') {
- let kv = part.trim().splitn(2, ':').collect::<Vec<_>>();
- match kv.as_slice() {
- [key, value] => {
- let key_owned = key.trim().to_owned();
- let value_owned = value.trim().to_owned();
- let existed = out.insert(key_owned, value_owned).is_some();
- if existed {
- return Err(format!("key '{key}' passed multiple times").into());
- }
- }
- _ => {
- return Err(
- format!("Invalid key value pair - expected 'KEY:VALUE' got '{s}'").into(),
- );
- }
- }
- }
-
- Ok(out)
-}
diff --git a/influxdb3_clap_blocks/Cargo.toml b/influxdb3_clap_blocks/Cargo.toml
index 5bdc24393f..25da081dd7 100644
--- a/influxdb3_clap_blocks/Cargo.toml
+++ b/influxdb3_clap_blocks/Cargo.toml
@@ -7,10 +7,12 @@ license.workspace = true
[dependencies]
# core crate dependencies
+iox_query.workspace = true
observability_deps.workspace = true
# crates.io dependencies
clap.workspace = true
+datafusion.workspace = true
humantime.workspace = true
libc.workspace = true
paste.workspace = true
@@ -18,6 +20,7 @@ tokio.workspace = true
[dev-dependencies]
futures.workspace = true
+test-log.workspace = true
[lints]
workspace = true
diff --git a/influxdb3_clap_blocks/src/datafusion.rs b/influxdb3_clap_blocks/src/datafusion.rs
new file mode 100644
index 0000000000..1ccb2e5c87
--- /dev/null
+++ b/influxdb3_clap_blocks/src/datafusion.rs
@@ -0,0 +1,125 @@
+use std::collections::HashMap;
+
+use datafusion::config::ConfigExtension;
+use iox_query::config::IoxConfigExt;
+
+/// Extends the standard [`HashMap`] based DataFusion config option in the CLI with specific
+/// options (along with defaults) for InfluxDB 3 OSS/Pro. This is intended for customization of
+/// options that are defined in the `iox_query` crate, e.g., those defined in [`IoxConfigExt`]
+/// that are relevant to the monolithinc versions of InfluxDB 3.
+#[derive(Debug, clap::Parser, Clone)]
+pub struct IoxQueryDatafusionConfig {
+ /// When multiple parquet files are required in a sorted way (e.g. for de-duplication), we have
+ /// two options:
+ ///
+ /// 1. **In-mem sorting:** Put them into `datafusion.target_partitions` DataFusion partitions.
+ /// This limits the fan-out, but requires that we potentially chain multiple parquet files into
+ /// a single DataFusion partition. Since chaining sorted data does NOT automatically result in
+ /// sorted data (e.g. AB-AB is not sorted), we need to preform an in-memory sort using
+ /// `SortExec` afterwards. This is expensive.
+ /// 2. **Fan-out:** Instead of chaining files within DataFusion partitions, we can accept a
+ /// fan-out beyond `target_partitions`. This prevents in-memory sorting but may result in OOMs
+ /// (out-of-memory) if the fan-out is too large.
+ ///
+ /// We try to pick option 2 up to a certain number of files, which is configured by this
+ /// setting.
+ #[clap(
+ long = "datafusion-max-parquet-fanout",
+ env = "INFLUXDB3_DATAFUSION_MAX_PARQUET_FANOUT",
+ default_value = "1000",
+ action
+ )]
+ pub max_parquet_fanout: usize,
+
+ /// Provide custom configuration to DataFusion as a comma-separated list of key:value pairs.
+ ///
+ /// # Example
+ /// ```text
+ /// --datafusion-config "datafusion.key1:value1, datafusion.key2:value2"
+ /// ```
+ #[clap(
+ long = "datafusion-config",
+ env = "INFLUXDB3_DATAFUSION_CONFIG",
+ default_value = "",
+ value_parser = parse_datafusion_config,
+ action
+ )]
+ pub datafusion_config: HashMap<String, String>,
+}
+
+impl IoxQueryDatafusionConfig {
+ /// Build a [`HashMap`] to be used as the DataFusion config for the query executor
+ ///
+ /// This takes the provided `--datafusion-config` and extends it with options available on this
+ /// [`IoxQueryDatafusionConfig`] struct. Note, any IOx extension parameters that are defined
+ /// in the `datafusion_config` will be overridden by the provided values or their default. For
+ /// example, if the user provides:
+ /// ```
+ /// --datafusion-config "iox.max_arquet_fanout:50"
+ /// ```
+ /// This will be overridden with with the default value for `max_parquet_fanout` of `1000`, or
+ /// with the value provided for the `--datafusion-max-parquet-fanout` argument.
+ pub fn build(mut self) -> HashMap<String, String> {
+ self.datafusion_config.insert(
+ format!("{prefix}.max_parquet_fanout", prefix = IoxConfigExt::PREFIX),
+ self.max_parquet_fanout.to_string(),
+ );
+ self.datafusion_config
+ }
+}
+
+fn parse_datafusion_config(
+ s: &str,
+) -> Result<HashMap<String, String>, Box<dyn std::error::Error + Send + Sync + 'static>> {
+ let s = s.trim();
+ if s.is_empty() {
+ return Ok(HashMap::with_capacity(0));
+ }
+
+ let mut out = HashMap::new();
+ for part in s.split(',') {
+ let kv = part.trim().splitn(2, ':').collect::<Vec<_>>();
+ match kv.as_slice() {
+ [key, value] => {
+ let key_owned = key.trim().to_owned();
+ let value_owned = value.trim().to_owned();
+ let existed = out.insert(key_owned, value_owned).is_some();
+ if existed {
+ return Err(format!("key '{key}' passed multiple times").into());
+ }
+ }
+ _ => {
+ return Err(
+ format!("Invalid key value pair - expected 'KEY:VALUE' got '{s}'").into(),
+ );
+ }
+ }
+ }
+
+ Ok(out)
+}
+
+#[cfg(test)]
+mod tests {
+ use clap::Parser;
+ use iox_query::{config::IoxConfigExt, exec::Executor};
+
+ use super::IoxQueryDatafusionConfig;
+
+ #[test_log::test]
+ fn max_parquet_fanout() {
+ let datafusion_config =
+ IoxQueryDatafusionConfig::parse_from(["", "--datafusion-max-parquet-fanout", "5"])
+ .build();
+ let exec = Executor::new_testing();
+ let mut session_config = exec.new_session_config();
+ for (k, v) in &datafusion_config {
+ session_config = session_config.with_config_option(k, v);
+ }
+ let ctx = session_config.build();
+ let inner_ctx = ctx.inner().state();
+ let config = inner_ctx.config();
+ let iox_config_ext = config.options().extensions.get::<IoxConfigExt>().unwrap();
+ assert_eq!(5, iox_config_ext.max_parquet_fanout);
+ }
+}
diff --git a/influxdb3_clap_blocks/src/lib.rs b/influxdb3_clap_blocks/src/lib.rs
index fcd0114403..f8d7844a29 100644
--- a/influxdb3_clap_blocks/src/lib.rs
+++ b/influxdb3_clap_blocks/src/lib.rs
@@ -1,3 +1,4 @@
//! Configuration options for the `influxdb3` CLI which uses the `clap` crate
+pub mod datafusion;
pub mod tokio;
|
9175f4a0b5562fb5c7130af16fa82ae2d60545ab
|
Andrew Lamb
|
2022-12-08 06:27:13
|
Upgrade datafusion to get correct support for multi-part identifiers (#6349)
|
* test: add tests for periods in measurement names
* chore: Update Datafusion
* chore: Update for changed APIs
* chore: Update expected plan output
* chore: Run cargo hakari tasks
|
Co-authored-by: CircleCI[bot] <[email protected]>
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
chore: Upgrade datafusion to get correct support for multi-part identifiers (#6349)
* test: add tests for periods in measurement names
* chore: Update Datafusion
* chore: Update for changed APIs
* chore: Update expected plan output
* chore: Run cargo hakari tasks
Co-authored-by: CircleCI[bot] <[email protected]>
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
diff --git a/Cargo.lock b/Cargo.lock
index a7e0d28be3..f40a65b5f3 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -1237,8 +1237,8 @@ dependencies = [
[[package]]
name = "datafusion"
-version = "14.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=799dd747152f6574638a844986b8ea8470d3f4d6#799dd747152f6574638a844986b8ea8470d3f4d6"
+version = "15.0.0"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=fbadebb894672f61327a30f77cda2ee88a343b2a#fbadebb894672f61327a30f77cda2ee88a343b2a"
dependencies = [
"ahash 0.8.2",
"arrow",
@@ -1270,6 +1270,7 @@ dependencies = [
"pin-project-lite",
"rand",
"smallvec",
+ "sqllogictest",
"sqlparser 0.27.0",
"tempfile",
"tokio",
@@ -1282,8 +1283,8 @@ dependencies = [
[[package]]
name = "datafusion-common"
-version = "14.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=799dd747152f6574638a844986b8ea8470d3f4d6#799dd747152f6574638a844986b8ea8470d3f4d6"
+version = "15.0.0"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=fbadebb894672f61327a30f77cda2ee88a343b2a#fbadebb894672f61327a30f77cda2ee88a343b2a"
dependencies = [
"arrow",
"chrono",
@@ -1294,8 +1295,8 @@ dependencies = [
[[package]]
name = "datafusion-expr"
-version = "14.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=799dd747152f6574638a844986b8ea8470d3f4d6#799dd747152f6574638a844986b8ea8470d3f4d6"
+version = "15.0.0"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=fbadebb894672f61327a30f77cda2ee88a343b2a#fbadebb894672f61327a30f77cda2ee88a343b2a"
dependencies = [
"ahash 0.8.2",
"arrow",
@@ -1306,8 +1307,8 @@ dependencies = [
[[package]]
name = "datafusion-optimizer"
-version = "14.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=799dd747152f6574638a844986b8ea8470d3f4d6#799dd747152f6574638a844986b8ea8470d3f4d6"
+version = "15.0.0"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=fbadebb894672f61327a30f77cda2ee88a343b2a#fbadebb894672f61327a30f77cda2ee88a343b2a"
dependencies = [
"arrow",
"async-trait",
@@ -1321,8 +1322,8 @@ dependencies = [
[[package]]
name = "datafusion-physical-expr"
-version = "14.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=799dd747152f6574638a844986b8ea8470d3f4d6#799dd747152f6574638a844986b8ea8470d3f4d6"
+version = "15.0.0"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=fbadebb894672f61327a30f77cda2ee88a343b2a#fbadebb894672f61327a30f77cda2ee88a343b2a"
dependencies = [
"ahash 0.8.2",
"arrow",
@@ -1350,8 +1351,8 @@ dependencies = [
[[package]]
name = "datafusion-proto"
-version = "14.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=799dd747152f6574638a844986b8ea8470d3f4d6#799dd747152f6574638a844986b8ea8470d3f4d6"
+version = "15.0.0"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=fbadebb894672f61327a30f77cda2ee88a343b2a#fbadebb894672f61327a30f77cda2ee88a343b2a"
dependencies = [
"arrow",
"chrono",
@@ -1367,8 +1368,8 @@ dependencies = [
[[package]]
name = "datafusion-row"
-version = "14.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=799dd747152f6574638a844986b8ea8470d3f4d6#799dd747152f6574638a844986b8ea8470d3f4d6"
+version = "15.0.0"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=fbadebb894672f61327a30f77cda2ee88a343b2a#fbadebb894672f61327a30f77cda2ee88a343b2a"
dependencies = [
"arrow",
"datafusion-common",
@@ -1378,10 +1379,10 @@ dependencies = [
[[package]]
name = "datafusion-sql"
-version = "14.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=799dd747152f6574638a844986b8ea8470d3f4d6#799dd747152f6574638a844986b8ea8470d3f4d6"
+version = "15.0.0"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=fbadebb894672f61327a30f77cda2ee88a343b2a#fbadebb894672f61327a30f77cda2ee88a343b2a"
dependencies = [
- "arrow",
+ "arrow-schema",
"datafusion-common",
"datafusion-expr",
"sqlparser 0.27.0",
@@ -1417,6 +1418,12 @@ version = "0.1.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "56254986775e3233ffa9c4d7d3faaf6d36a2c09d30b20687e9f88bc8bafc16c8"
+[[package]]
+name = "difference"
+version = "2.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "524cbf6897b527295dff137cec09ecf3a05f4fddffd7dfcd1585403449e74198"
+
[[package]]
name = "difflib"
version = "0.4.0"
@@ -3017,6 +3024,17 @@ version = "0.2.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "348108ab3fba42ec82ff6e9564fc4ca0247bdccdc68dd8af9764bbc79c3c8ffb"
+[[package]]
+name = "libtest-mimic"
+version = "0.6.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d7b603516767d1ab23d0de09d023e62966c3322f7148297c35cf3d97aa8b37fa"
+dependencies = [
+ "clap 4.0.29",
+ "termcolor",
+ "threadpool",
+]
+
[[package]]
name = "link-cplusplus"
version = "1.0.7"
@@ -3707,6 +3725,7 @@ name = "parquet_to_line_protocol"
version = "0.1.0"
dependencies = [
"datafusion",
+ "datafusion_util",
"futures",
"influxdb_line_protocol",
"mutable_batch",
@@ -5075,6 +5094,25 @@ dependencies = [
"unicode_categories",
]
+[[package]]
+name = "sqllogictest"
+version = "0.8.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ba41e01d229d7725401de371e323851f82d839d68732a06162405362b60852fe"
+dependencies = [
+ "async-trait",
+ "difference",
+ "futures",
+ "glob",
+ "humantime",
+ "itertools",
+ "libtest-mimic",
+ "regex",
+ "tempfile",
+ "thiserror",
+ "tracing",
+]
+
[[package]]
name = "sqlparser"
version = "0.27.0"
@@ -6327,6 +6365,7 @@ dependencies = [
"bytes",
"cc",
"chrono",
+ "clap 4.0.29",
"crossbeam-utils",
"crypto-common",
"datafusion",
@@ -6346,7 +6385,6 @@ dependencies = [
"hashbrown 0.13.1",
"heck",
"indexmap",
- "io-lifetimes",
"libc",
"lock_api",
"log",
diff --git a/Cargo.toml b/Cargo.toml
index fa0a68d6a4..9f4ed11dd9 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -114,8 +114,8 @@ license = "MIT OR Apache-2.0"
[workspace.dependencies]
arrow = { version = "28.0.0" }
arrow-flight = { version = "28.0.0" }
-datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev="799dd747152f6574638a844986b8ea8470d3f4d6", default-features = false }
-datafusion-proto = { git = "https://github.com/apache/arrow-datafusion.git", rev="799dd747152f6574638a844986b8ea8470d3f4d6" }
+datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev="fbadebb894672f61327a30f77cda2ee88a343b2a", default-features = false }
+datafusion-proto = { git = "https://github.com/apache/arrow-datafusion.git", rev="fbadebb894672f61327a30f77cda2ee88a343b2a" }
hashbrown = { version = "0.13.1" }
parquet = { version = "28.0.0" }
diff --git a/datafusion_util/src/config.rs b/datafusion_util/src/config.rs
index d16a9a001e..b3ac8bae01 100644
--- a/datafusion_util/src/config.rs
+++ b/datafusion_util/src/config.rs
@@ -26,7 +26,7 @@ pub fn iox_session_config() -> SessionConfig {
// Enable parquet predicate pushdown optimization
.set_bool(OPT_PARQUET_PUSHDOWN_FILTERS, true)
.set_bool(OPT_PARQUET_REORDER_FILTERS, true)
- .create_default_catalog_and_schema(true)
+ .with_create_default_catalog_and_schema(true)
.with_information_schema(true)
.with_default_catalog_and_schema(DEFAULT_CATALOG, DEFAULT_SCHEMA)
}
diff --git a/iox_query/src/frontend/influxrpc.rs b/iox_query/src/frontend/influxrpc.rs
index 21d793fbee..6bbd697608 100644
--- a/iox_query/src/frontend/influxrpc.rs
+++ b/iox_query/src/frontend/influxrpc.rs
@@ -1445,7 +1445,7 @@ fn table_chunk_stream<'a>(
.chunks(
table_name,
predicate,
- &projection,
+ projection.as_ref(),
ctx.child_ctx("table chunks"),
)
.await
diff --git a/iox_query/src/lib.rs b/iox_query/src/lib.rs
index 8570ab5b8b..29e080def4 100644
--- a/iox_query/src/lib.rs
+++ b/iox_query/src/lib.rs
@@ -154,7 +154,7 @@ pub trait QueryNamespace: QueryNamespaceMeta + Debug + Send + Sync {
&self,
table_name: &str,
predicate: &Predicate,
- projection: &Option<Vec<usize>>,
+ projection: Option<&Vec<usize>>,
ctx: IOxSessionContext,
) -> Result<Vec<Arc<dyn QueryChunk>>, DataFusionError>;
diff --git a/iox_query/src/provider.rs b/iox_query/src/provider.rs
index 589d06893c..5801099c59 100644
--- a/iox_query/src/provider.rs
+++ b/iox_query/src/provider.rs
@@ -221,7 +221,7 @@ impl TableProvider for ChunkTableProvider {
async fn scan(
&self,
_ctx: &SessionState,
- projection: &Option<Vec<usize>>,
+ projection: Option<&Vec<usize>>,
filters: &[Expr],
_limit: Option<usize>,
) -> std::result::Result<Arc<dyn ExecutionPlan>, DataFusionError> {
diff --git a/iox_query/src/provider/physical.rs b/iox_query/src/provider/physical.rs
index 40779e3e22..bd2fbcc575 100644
--- a/iox_query/src/provider/physical.rs
+++ b/iox_query/src/provider/physical.rs
@@ -166,7 +166,7 @@ pub fn chunks_to_physical_nodes(
}
let mut parquet_chunks: Vec<_> = parquet_chunks.into_iter().collect();
parquet_chunks.sort_by_key(|(url_str, _)| url_str.clone());
- let target_partitions = context.session_config().target_partitions;
+ let target_partitions = context.session_config().target_partitions();
for (_url_str, chunk_list) in parquet_chunks {
let ParquetChunkList {
object_store_url,
diff --git a/iox_query/src/test.rs b/iox_query/src/test.rs
index b189f0565a..670224f334 100644
--- a/iox_query/src/test.rs
+++ b/iox_query/src/test.rs
@@ -104,7 +104,7 @@ impl QueryNamespace for TestDatabase {
&self,
table_name: &str,
predicate: &Predicate,
- _projection: &Option<Vec<usize>>,
+ _projection: Option<&Vec<usize>>,
_ctx: IOxSessionContext,
) -> Result<Vec<Arc<dyn QueryChunk>>, DataFusionError> {
// save last predicate
diff --git a/parquet_to_line_protocol/Cargo.toml b/parquet_to_line_protocol/Cargo.toml
index 966184c8bb..0d4af4df25 100644
--- a/parquet_to_line_protocol/Cargo.toml
+++ b/parquet_to_line_protocol/Cargo.toml
@@ -7,6 +7,7 @@ license.workspace = true
[dependencies]
datafusion = { workspace = true }
+datafusion_util = { path = "../datafusion_util" }
influxdb_line_protocol = { path = "../influxdb_line_protocol" }
futures = {version = "0.3"}
num_cpus = "1.13.1"
diff --git a/parquet_to_line_protocol/src/lib.rs b/parquet_to_line_protocol/src/lib.rs
index 46710d5db1..05704f571a 100644
--- a/parquet_to_line_protocol/src/lib.rs
+++ b/parquet_to_line_protocol/src/lib.rs
@@ -2,7 +2,6 @@
use datafusion::{
arrow::datatypes::SchemaRef as ArrowSchemaRef,
- config::ConfigOptions,
datasource::{
file_format::{parquet::ParquetFormat, FileFormat},
listing::PartitionedFile,
@@ -16,6 +15,7 @@ use datafusion::{
},
prelude::{SessionConfig, SessionContext},
};
+use datafusion_util::config::iox_session_config;
use futures::StreamExt;
use object_store::{
local::LocalFileSystem, path::Path as ObjectStorePath, ObjectMeta, ObjectStore,
@@ -162,9 +162,8 @@ pub struct ParquetFileReader {
/// Parquet file metadata
schema: ArrowSchemaRef,
- /// number of rows to read in each batch (can pick small to
- /// increase parallelism). Defaults to 1000
- batch_size: usize,
+ /// DataFusion configuration, such as the target batchsize, etc
+ session_config: SessionConfig,
}
impl ParquetFileReader {
@@ -174,8 +173,11 @@ impl ParquetFileReader {
object_store_url: ObjectStoreUrl,
object_meta: ObjectMeta,
) -> Result<Self, Error> {
+ let session_config = iox_session_config();
+
// Keep metadata so we can find the measurement name
- let format = ParquetFormat::default().with_skip_metadata(false);
+ let format =
+ ParquetFormat::new(session_config.config_options()).with_skip_metadata(Some(false));
// Use datafusion parquet reader to read the metadata from the
// file.
@@ -189,7 +191,7 @@ impl ParquetFileReader {
object_store_url,
object_meta,
schema,
- batch_size: 1000,
+ session_config,
})
}
@@ -214,15 +216,14 @@ impl ParquetFileReader {
limit: None,
table_partition_cols: vec![],
output_ordering: None,
- config_options: ConfigOptions::new().into_shareable(),
+ config_options: self.session_config.config_options(),
};
// set up enough datafusion context to do the real read session
let predicate = None;
let metadata_size_hint = None;
let exec = ParquetExec::new(base_config, predicate, metadata_size_hint);
- let session_config = SessionConfig::new().with_batch_size(self.batch_size);
- let session_ctx = SessionContext::with_config(session_config);
+ let session_ctx = SessionContext::with_config(self.session_config.clone());
let object_store = Arc::clone(&self.object_store);
let task_ctx = Arc::new(TaskContext::from(&session_ctx));
diff --git a/querier/src/namespace/query_access.rs b/querier/src/namespace/query_access.rs
index 7bbefefb19..0cb71ce49a 100644
--- a/querier/src/namespace/query_access.rs
+++ b/querier/src/namespace/query_access.rs
@@ -42,7 +42,7 @@ impl QueryNamespace for QuerierNamespace {
&self,
table_name: &str,
predicate: &Predicate,
- projection: &Option<Vec<usize>>,
+ projection: Option<&Vec<usize>>,
ctx: IOxSessionContext,
) -> Result<Vec<Arc<dyn QueryChunk>>, DataFusionError> {
debug!(%table_name, %predicate, "Finding chunks for table");
@@ -487,15 +487,15 @@ mod tests {
&querier_namespace,
"EXPLAIN SELECT * FROM cpu",
&[
- "+---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+",
- "| plan_type | plan |",
- "+---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+",
- "| logical_plan | Projection: cpu.foo, cpu.host, cpu.load, cpu.time |",
- "| | TableScan: cpu projection=[foo, host, load, time] |",
- "| physical_plan | ProjectionExec: expr=[foo@0 as foo, host@1 as host, load@2 as load, time@3 as time] |",
- "| | ParquetExec: limit=None, partitions=[1/1/1/1/<uuid>.parquet, 1/1/1/1/<uuid>.parquet, 1/1/1/1/<uuid>.parquet, 1/1/2/2/<uuid>.parquet, 1/1/1/3/<uuid>.parquet], projection=[foo, host, load, time] |",
- "| | |",
- "+---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+",
+ "+---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+",
+ "| plan_type | plan |",
+ "+---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+",
+ "| logical_plan | Projection: cpu.foo, cpu.host, cpu.load, cpu.time |",
+ "| | TableScan: cpu projection=[foo, host, load, time] |",
+ "| physical_plan | ProjectionExec: expr=[foo@0 as foo, host@1 as host, load@2 as load, time@3 as time] |",
+ "| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/<uuid>.parquet, 1/1/1/1/<uuid>.parquet, 1/1/1/1/<uuid>.parquet, 1/1/2/2/<uuid>.parquet, 1/1/1/3/<uuid>.parquet]]}, projection=[foo, host, load, time] |",
+ "| | |",
+ "+---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+",
],
)
.await;
@@ -507,24 +507,24 @@ mod tests {
&querier_namespace,
"EXPLAIN SELECT * FROM mem ORDER BY host,time",
&[
- "+---------------+---------------------------------------------------------------------------------------------------------------------------------------+",
- "| plan_type | plan |",
- "+---------------+---------------------------------------------------------------------------------------------------------------------------------------+",
- "| logical_plan | Sort: mem.host ASC NULLS LAST, mem.time ASC NULLS LAST |",
- "| | Projection: mem.host, mem.perc, mem.time |",
- "| | TableScan: mem projection=[host, perc, time] |",
- "| physical_plan | SortExec: [host@0 ASC NULLS LAST,time@2 ASC NULLS LAST] |",
- "| | CoalescePartitionsExec |",
- "| | ProjectionExec: expr=[host@0 as host, perc@1 as perc, time@2 as time] |",
- "| | UnionExec |",
- "| | CoalesceBatchesExec: target_batch_size=4096 |",
- "| | FilterExec: time@2 < 1 OR time@2 > 13 OR NOT host@0 = CAST(d AS Dictionary(Int32, Utf8)) |",
- "| | ParquetExec: limit=None, partitions=[1/2/1/4/<uuid>.parquet], projection=[host, perc, time] |",
- "| | CoalesceBatchesExec: target_batch_size=4096 |",
- "| | FilterExec: time@2 < 1 OR time@2 > 13 OR NOT host@0 = CAST(d AS Dictionary(Int32, Utf8)) |",
- "| | ParquetExec: limit=None, partitions=[1/2/1/4/<uuid>.parquet], projection=[host, perc, time] |",
- "| | |",
- "+---------------+---------------------------------------------------------------------------------------------------------------------------------------+",
+ "+---------------+----------------------------------------------------------------------------------------------------------------------------------------------------+",
+ "| plan_type | plan |",
+ "+---------------+----------------------------------------------------------------------------------------------------------------------------------------------------+",
+ "| logical_plan | Sort: mem.host ASC NULLS LAST, mem.time ASC NULLS LAST |",
+ "| | Projection: mem.host, mem.perc, mem.time |",
+ "| | TableScan: mem projection=[host, perc, time] |",
+ "| physical_plan | SortExec: [host@0 ASC NULLS LAST,time@2 ASC NULLS LAST] |",
+ "| | CoalescePartitionsExec |",
+ "| | ProjectionExec: expr=[host@0 as host, perc@1 as perc, time@2 as time] |",
+ "| | UnionExec |",
+ "| | CoalesceBatchesExec: target_batch_size=4096 |",
+ "| | FilterExec: time@2 < 1 OR time@2 > 13 OR NOT host@0 = CAST(d AS Dictionary(Int32, Utf8)) |",
+ "| | ParquetExec: limit=None, partitions={1 group: [[1/2/1/4/<uuid>.parquet]]}, projection=[host, perc, time] |",
+ "| | CoalesceBatchesExec: target_batch_size=4096 |",
+ "| | FilterExec: time@2 < 1 OR time@2 > 13 OR NOT host@0 = CAST(d AS Dictionary(Int32, Utf8)) |",
+ "| | ParquetExec: limit=None, partitions={1 group: [[1/2/1/4/<uuid>.parquet]]}, projection=[host, perc, time] |",
+ "| | |",
+ "+---------------+----------------------------------------------------------------------------------------------------------------------------------------------------+",
],
)
.await;
@@ -567,21 +567,21 @@ mod tests {
&querier_namespace,
"EXPLAIN SELECT * FROM cpu",
&[
- "+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+",
- "| plan_type | plan |",
- "+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+",
- "| logical_plan | Projection: cpu.foo, cpu.host, cpu.load, cpu.time |",
- "| | TableScan: cpu projection=[foo, host, load, time] |",
- "| physical_plan | ProjectionExec: expr=[foo@0 as foo, host@1 as host, load@2 as load, time@3 as time] |",
- "| | UnionExec |",
- "| | DeduplicateExec: [host@1 ASC,time@3 ASC] |",
- "| | SortPreservingMergeExec: [host@1 ASC,time@3 ASC] |",
- "| | UnionExec |",
- "| | ParquetExec: limit=None, partitions=[1/1/2/2/<uuid>.parquet], output_ordering=[host@1 ASC, time@3 ASC], projection=[foo, host, load, time] |",
- "| | ParquetExec: limit=None, partitions=[1/1/2/2/<uuid>.parquet], output_ordering=[host@1 ASC, time@3 ASC], projection=[foo, host, load, time] |",
- "| | ParquetExec: limit=None, partitions=[1/1/1/1/<uuid>.parquet, 1/1/1/1/<uuid>.parquet, 1/1/1/1/<uuid>.parquet, 1/1/1/3/<uuid>.parquet], projection=[foo, host, load, time] |",
- "| | |",
- "+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+",
+ "+---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+",
+ "| plan_type | plan |",
+ "+---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+",
+ "| logical_plan | Projection: cpu.foo, cpu.host, cpu.load, cpu.time |",
+ "| | TableScan: cpu projection=[foo, host, load, time] |",
+ "| physical_plan | ProjectionExec: expr=[foo@0 as foo, host@1 as host, load@2 as load, time@3 as time] |",
+ "| | UnionExec |",
+ "| | DeduplicateExec: [host@1 ASC,time@3 ASC] |",
+ "| | SortPreservingMergeExec: [host@1 ASC,time@3 ASC] |",
+ "| | UnionExec |",
+ "| | ParquetExec: limit=None, partitions={1 group: [[1/1/2/2/<uuid>.parquet]]}, output_ordering=[host@1 ASC, time@3 ASC], projection=[foo, host, load, time] |",
+ "| | ParquetExec: limit=None, partitions={1 group: [[1/1/2/2/<uuid>.parquet]]}, output_ordering=[host@1 ASC, time@3 ASC], projection=[foo, host, load, time] |",
+ "| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/<uuid>.parquet, 1/1/1/1/<uuid>.parquet, 1/1/1/1/<uuid>.parquet, 1/1/1/3/<uuid>.parquet]]}, projection=[foo, host, load, time] |",
+ "| | |",
+ "+---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+",
],
)
.await;
diff --git a/querier/src/system_tables/mod.rs b/querier/src/system_tables/mod.rs
index 55faed8429..0429b55a1f 100644
--- a/querier/src/system_tables/mod.rs
+++ b/querier/src/system_tables/mod.rs
@@ -101,7 +101,7 @@ where
async fn scan(
&self,
_ctx: &SessionState,
- projection: &Option<Vec<usize>>,
+ projection: Option<&Vec<usize>>,
// It would be cool to push projection and limit down
_filters: &[Expr],
_limit: Option<usize>,
@@ -114,7 +114,7 @@ where
Ok(Arc::new(SystemTableExecutionPlan {
table: Arc::clone(&self.table),
- projection: projection.clone(),
+ projection: projection.cloned(),
projected_schema,
}))
}
diff --git a/querier/src/table/mod.rs b/querier/src/table/mod.rs
index 5f86bd3433..be25956c11 100644
--- a/querier/src/table/mod.rs
+++ b/querier/src/table/mod.rs
@@ -193,7 +193,7 @@ impl QuerierTable {
&self,
predicate: &Predicate,
span: Option<Span>,
- projection: &Option<Vec<usize>>,
+ projection: Option<&Vec<usize>>,
) -> Result<Vec<Arc<dyn QueryChunk>>> {
let mut span_recorder = SpanRecorder::new(span);
match self
@@ -215,7 +215,7 @@ impl QuerierTable {
&self,
predicate: &Predicate,
span_recorder: &SpanRecorder,
- projection: &Option<Vec<usize>>,
+ projection: Option<&Vec<usize>>,
) -> Result<Vec<Arc<dyn QueryChunk>>> {
debug!(
?predicate,
@@ -429,7 +429,7 @@ impl QuerierTable {
&self,
predicate: &Predicate,
span: Option<Span>,
- projection: &Option<Vec<usize>>,
+ projection: Option<&Vec<usize>>,
) -> Result<Vec<IngesterPartition>> {
let mut span_recorder = SpanRecorder::new(span);
@@ -464,7 +464,7 @@ impl QuerierTable {
ingester_connection: Arc<dyn IngesterConnection>,
predicate: &Predicate,
span_recorder: &SpanRecorder,
- projection: &Option<Vec<usize>>,
+ projection: Option<&Vec<usize>>,
) -> Result<Vec<IngesterPartition>> {
// If the projection is provided, use it. Otherwise, use all columns of the table
// The provided projection should include all columns needed by the query
@@ -826,7 +826,7 @@ mod tests {
// Expect one chunk from the ingester
let pred = Predicate::new().with_range(0, 100);
let chunks = querier_table
- .chunks_with_predicate_and_projection(&pred, &Some(vec![1])) // only select `foo` column
+ .chunks_with_predicate_and_projection(&pred, Some(&vec![1])) // only select `foo` column
.await
.unwrap();
assert_eq!(chunks.len(), 1);
@@ -1369,14 +1369,14 @@ mod tests {
&self,
pred: &Predicate,
) -> Result<Vec<Arc<dyn QueryChunk>>> {
- self.chunks_with_predicate_and_projection(pred, &None).await
+ self.chunks_with_predicate_and_projection(pred, None).await
}
/// Invokes querier_table.chunks modeling the ingester sending the partitions in this table
async fn chunks_with_predicate_and_projection(
&self,
pred: &Predicate,
- projection: &Option<Vec<usize>>,
+ projection: Option<&Vec<usize>>,
) -> Result<Vec<Arc<dyn QueryChunk>>> {
self.querier_table
.ingester_connection
diff --git a/querier/src/table/query_access/mod.rs b/querier/src/table/query_access/mod.rs
index a0702dac67..e82af6dac3 100644
--- a/querier/src/table/query_access/mod.rs
+++ b/querier/src/table/query_access/mod.rs
@@ -44,7 +44,7 @@ impl TableProvider for QuerierTable {
async fn scan(
&self,
ctx: &SessionState,
- projection: &Option<Vec<usize>>,
+ projection: Option<&Vec<usize>>,
filters: &[Expr],
limit: Option<usize>,
) -> Result<Arc<dyn ExecutionPlan>, DataFusionError> {
diff --git a/query_tests/cases/in/dedup_and_predicates_parquet.expected b/query_tests/cases/in/dedup_and_predicates_parquet.expected
index 80b4c85b52..f4067049ad 100644
--- a/query_tests/cases/in/dedup_and_predicates_parquet.expected
+++ b/query_tests/cases/in/dedup_and_predicates_parquet.expected
@@ -8,23 +8,23 @@
+-----+-----+-----+----------------------+
-- SQL: EXPLAIN SELECT * FROM "table" ORDER BY tag;
-- Results After Normalizing UUIDs
-+---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| plan_type | plan |
-+---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| logical_plan | Sort: table.tag ASC NULLS LAST |
-| | Projection: table.bar, table.foo, table.tag, table.time |
-| | TableScan: table projection=[bar, foo, tag, time] |
-| physical_plan | SortExec: [tag@2 ASC NULLS LAST] |
-| | CoalescePartitionsExec |
-| | ProjectionExec: expr=[bar@0 as bar, foo@1 as foo, tag@2 as tag, time@3 as time] |
-| | RepartitionExec: partitioning=RoundRobinBatch(4) |
-| | DeduplicateExec: [tag@2 ASC,time@3 ASC] |
-| | SortPreservingMergeExec: [tag@2 ASC,time@3 ASC] |
-| | UnionExec |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet], output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] |
-| | |
-+---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
++---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| plan_type | plan |
++---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| logical_plan | Sort: table.tag ASC NULLS LAST |
+| | Projection: table.bar, table.foo, table.tag, table.time |
+| | TableScan: table projection=[bar, foo, tag, time] |
+| physical_plan | SortExec: [tag@2 ASC NULLS LAST] |
+| | CoalescePartitionsExec |
+| | ProjectionExec: expr=[bar@0 as bar, foo@1 as foo, tag@2 as tag, time@3 as time] |
+| | RepartitionExec: partitioning=RoundRobinBatch(4) |
+| | DeduplicateExec: [tag@2 ASC,time@3 ASC] |
+| | SortPreservingMergeExec: [tag@2 ASC,time@3 ASC] |
+| | UnionExec |
+| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] |
+| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet]]}, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] |
+| | |
++---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-- SQL: SELECT * FROM "table" WHERE tag='A';
+-----+-----+-----+----------------------+
| bar | foo | tag | time |
@@ -33,23 +33,23 @@
+-----+-----+-----+----------------------+
-- SQL: EXPLAIN SELECT * FROM "table" WHERE tag='A';
-- Results After Normalizing UUIDs
-+---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| plan_type | plan |
-+---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| logical_plan | Projection: table.bar, table.foo, table.tag, table.time |
-| | Filter: table.tag = Dictionary(Int32, Utf8("A")) |
-| | TableScan: table projection=[bar, foo, tag, time], partial_filters=[table.tag = Dictionary(Int32, Utf8("A"))] |
-| physical_plan | ProjectionExec: expr=[bar@0 as bar, foo@1 as foo, tag@2 as tag, time@3 as time] |
-| | CoalesceBatchesExec: target_batch_size=4096 |
-| | FilterExec: tag@2 = A |
-| | RepartitionExec: partitioning=RoundRobinBatch(4) |
-| | DeduplicateExec: [tag@2 ASC,time@3 ASC] |
-| | SortPreservingMergeExec: [tag@2 ASC,time@3 ASC] |
-| | UnionExec |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=tag = Dictionary(Int32, Utf8("A")), pruning_predicate=tag_min@0 <= A AND A <= tag_max@1, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet], predicate=tag = Dictionary(Int32, Utf8("A")), pruning_predicate=tag_min@0 <= A AND A <= tag_max@1, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] |
-| | |
-+---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
++---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| plan_type | plan |
++---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| logical_plan | Projection: table.bar, table.foo, table.tag, table.time |
+| | Filter: table.tag = Dictionary(Int32, Utf8("A")) |
+| | TableScan: table projection=[bar, foo, tag, time], partial_filters=[table.tag = Dictionary(Int32, Utf8("A"))] |
+| physical_plan | ProjectionExec: expr=[bar@0 as bar, foo@1 as foo, tag@2 as tag, time@3 as time] |
+| | CoalesceBatchesExec: target_batch_size=4096 |
+| | FilterExec: tag@2 = A |
+| | RepartitionExec: partitioning=RoundRobinBatch(4) |
+| | DeduplicateExec: [tag@2 ASC,time@3 ASC] |
+| | SortPreservingMergeExec: [tag@2 ASC,time@3 ASC] |
+| | UnionExec |
+| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=tag = Dictionary(Int32, Utf8("A")), pruning_predicate=tag_min@0 <= A AND A <= tag_max@1, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] |
+| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet]]}, predicate=tag = Dictionary(Int32, Utf8("A")), pruning_predicate=tag_min@0 <= A AND A <= tag_max@1, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] |
+| | |
++---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-- SQL: SELECT * FROM "table" WHERE foo=1 AND bar=2;
+-----+-----+-----+----------------------+
| bar | foo | tag | time |
@@ -58,23 +58,23 @@
+-----+-----+-----+----------------------+
-- SQL: EXPLAIN SELECT * FROM "table" WHERE foo=1 AND bar=2;
-- Results After Normalizing UUIDs
-+---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| plan_type | plan |
-+---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| logical_plan | Projection: table.bar, table.foo, table.tag, table.time |
-| | Filter: table.foo = Float64(1) AND table.bar = Float64(2) |
-| | TableScan: table projection=[bar, foo, tag, time], partial_filters=[table.foo = Float64(1), table.bar = Float64(2)] |
-| physical_plan | ProjectionExec: expr=[bar@0 as bar, foo@1 as foo, tag@2 as tag, time@3 as time] |
-| | CoalesceBatchesExec: target_batch_size=4096 |
-| | FilterExec: foo@1 = 1 AND bar@0 = 2 |
-| | RepartitionExec: partitioning=RoundRobinBatch(4) |
-| | DeduplicateExec: [tag@2 ASC,time@3 ASC] |
-| | SortPreservingMergeExec: [tag@2 ASC,time@3 ASC] |
-| | UnionExec |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet], output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] |
-| | |
-+---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
++---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| plan_type | plan |
++---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| logical_plan | Projection: table.bar, table.foo, table.tag, table.time |
+| | Filter: table.foo = Float64(1) AND table.bar = Float64(2) |
+| | TableScan: table projection=[bar, foo, tag, time], partial_filters=[table.foo = Float64(1), table.bar = Float64(2)] |
+| physical_plan | ProjectionExec: expr=[bar@0 as bar, foo@1 as foo, tag@2 as tag, time@3 as time] |
+| | CoalesceBatchesExec: target_batch_size=4096 |
+| | FilterExec: foo@1 = 1 AND bar@0 = 2 |
+| | RepartitionExec: partitioning=RoundRobinBatch(4) |
+| | DeduplicateExec: [tag@2 ASC,time@3 ASC] |
+| | SortPreservingMergeExec: [tag@2 ASC,time@3 ASC] |
+| | UnionExec |
+| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] |
+| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet]]}, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] |
+| | |
++---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-- SQL: SELECT * FROM "table" WHERE time=to_timestamp('1970-01-01T00:00:00.000000000+00:00') ORDER BY tag;
+-----+-----+-----+----------------------+
| bar | foo | tag | time |
@@ -84,26 +84,26 @@
+-----+-----+-----+----------------------+
-- SQL: EXPLAIN SELECT * FROM "table" WHERE time=to_timestamp('1970-01-01T00:00:00.000000000+00:00') ORDER BY tag;
-- Results After Normalizing UUIDs
-+---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| plan_type | plan |
-+---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| logical_plan | Sort: table.tag ASC NULLS LAST |
-| | Projection: table.bar, table.foo, table.tag, table.time |
-| | Filter: table.time = TimestampNanosecond(0, None) |
-| | TableScan: table projection=[bar, foo, tag, time], partial_filters=[table.time = TimestampNanosecond(0, None)] |
-| physical_plan | SortExec: [tag@2 ASC NULLS LAST] |
-| | CoalescePartitionsExec |
-| | ProjectionExec: expr=[bar@0 as bar, foo@1 as foo, tag@2 as tag, time@3 as time] |
-| | CoalesceBatchesExec: target_batch_size=4096 |
-| | FilterExec: time@3 = 0 |
-| | RepartitionExec: partitioning=RoundRobinBatch(4) |
-| | DeduplicateExec: [tag@2 ASC,time@3 ASC] |
-| | SortPreservingMergeExec: [tag@2 ASC,time@3 ASC] |
-| | UnionExec |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=time = TimestampNanosecond(0, None), pruning_predicate=time_min@0 <= 0 AND 0 <= time_max@1, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet], predicate=time = TimestampNanosecond(0, None), pruning_predicate=time_min@0 <= 0 AND 0 <= time_max@1, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] |
-| | |
-+---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
++---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| plan_type | plan |
++---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| logical_plan | Sort: table.tag ASC NULLS LAST |
+| | Projection: table.bar, table.foo, table.tag, table.time |
+| | Filter: table.time = TimestampNanosecond(0, None) |
+| | TableScan: table projection=[bar, foo, tag, time], partial_filters=[table.time = TimestampNanosecond(0, None)] |
+| physical_plan | SortExec: [tag@2 ASC NULLS LAST] |
+| | CoalescePartitionsExec |
+| | ProjectionExec: expr=[bar@0 as bar, foo@1 as foo, tag@2 as tag, time@3 as time] |
+| | CoalesceBatchesExec: target_batch_size=4096 |
+| | FilterExec: time@3 = 0 |
+| | RepartitionExec: partitioning=RoundRobinBatch(4) |
+| | DeduplicateExec: [tag@2 ASC,time@3 ASC] |
+| | SortPreservingMergeExec: [tag@2 ASC,time@3 ASC] |
+| | UnionExec |
+| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=time = TimestampNanosecond(0, None), pruning_predicate=time_min@0 <= 0 AND 0 <= time_max@1, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] |
+| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet]]}, predicate=time = TimestampNanosecond(0, None), pruning_predicate=time_min@0 <= 0 AND 0 <= time_max@1, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] |
+| | |
++---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-- SQL: SELECT * FROM "table" WHERE tag='A' AND foo=1 AND time=to_timestamp('1970-01-01T00:00:00.000000000+00:00');
+-----+-----+-----+----------------------+
| bar | foo | tag | time |
@@ -112,20 +112,20 @@
+-----+-----+-----+----------------------+
-- SQL: EXPLAIN SELECT * FROM "table" WHERE tag='A' AND foo=1 AND time=to_timestamp('1970-01-01T00:00:00.000000000+00:00');
-- Results After Normalizing UUIDs
-+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| plan_type | plan |
-+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| logical_plan | Projection: table.bar, table.foo, table.tag, table.time |
-| | Filter: table.tag = Dictionary(Int32, Utf8("A")) AND table.foo = Float64(1) AND table.time = TimestampNanosecond(0, None) |
-| | TableScan: table projection=[bar, foo, tag, time], partial_filters=[table.tag = Dictionary(Int32, Utf8("A")), table.foo = Float64(1), table.time = TimestampNanosecond(0, None)] |
-| physical_plan | ProjectionExec: expr=[bar@0 as bar, foo@1 as foo, tag@2 as tag, time@3 as time] |
-| | CoalesceBatchesExec: target_batch_size=4096 |
-| | FilterExec: tag@2 = A AND foo@1 = 1 AND time@3 = 0 |
-| | RepartitionExec: partitioning=RoundRobinBatch(4) |
-| | DeduplicateExec: [tag@2 ASC,time@3 ASC] |
-| | SortPreservingMergeExec: [tag@2 ASC,time@3 ASC] |
-| | UnionExec |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=tag = Dictionary(Int32, Utf8("A")) AND time = TimestampNanosecond(0, None), pruning_predicate=tag_min@0 <= A AND A <= tag_max@1 AND time_min@2 <= 0 AND 0 <= time_max@3, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet], predicate=tag = Dictionary(Int32, Utf8("A")) AND time = TimestampNanosecond(0, None), pruning_predicate=tag_min@0 <= A AND A <= tag_max@1 AND time_min@2 <= 0 AND 0 <= time_max@3, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] |
-| | |
-+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
++---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| plan_type | plan |
++---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| logical_plan | Projection: table.bar, table.foo, table.tag, table.time |
+| | Filter: table.tag = Dictionary(Int32, Utf8("A")) AND table.foo = Float64(1) AND table.time = TimestampNanosecond(0, None) |
+| | TableScan: table projection=[bar, foo, tag, time], partial_filters=[table.tag = Dictionary(Int32, Utf8("A")), table.foo = Float64(1), table.time = TimestampNanosecond(0, None)] |
+| physical_plan | ProjectionExec: expr=[bar@0 as bar, foo@1 as foo, tag@2 as tag, time@3 as time] |
+| | CoalesceBatchesExec: target_batch_size=4096 |
+| | FilterExec: tag@2 = A AND foo@1 = 1 AND time@3 = 0 |
+| | RepartitionExec: partitioning=RoundRobinBatch(4) |
+| | DeduplicateExec: [tag@2 ASC,time@3 ASC] |
+| | SortPreservingMergeExec: [tag@2 ASC,time@3 ASC] |
+| | UnionExec |
+| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=tag = Dictionary(Int32, Utf8("A")) AND time = TimestampNanosecond(0, None), pruning_predicate=tag_min@0 <= A AND A <= tag_max@1 AND time_min@2 <= 0 AND 0 <= time_max@3, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] |
+| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet]]}, predicate=tag = Dictionary(Int32, Utf8("A")) AND time = TimestampNanosecond(0, None), pruning_predicate=tag_min@0 <= A AND A <= tag_max@1 AND time_min@2 <= 0 AND 0 <= time_max@3, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] |
+| | |
++---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
diff --git a/query_tests/cases/in/dedup_and_predicates_parquet_ingester.expected b/query_tests/cases/in/dedup_and_predicates_parquet_ingester.expected
index 3cb766c556..c7a05cd987 100644
--- a/query_tests/cases/in/dedup_and_predicates_parquet_ingester.expected
+++ b/query_tests/cases/in/dedup_and_predicates_parquet_ingester.expected
@@ -8,24 +8,24 @@
+-----+-----+-----+----------------------+
-- SQL: EXPLAIN SELECT * FROM "table" ORDER BY tag;
-- Results After Normalizing UUIDs
-+---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| plan_type | plan |
-+---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| logical_plan | Sort: table.tag ASC NULLS LAST |
-| | Projection: table.bar, table.foo, table.tag, table.time |
-| | TableScan: table projection=[bar, foo, tag, time] |
-| physical_plan | SortExec: [tag@2 ASC NULLS LAST] |
-| | CoalescePartitionsExec |
-| | ProjectionExec: expr=[bar@0 as bar, foo@1 as foo, tag@2 as tag, time@3 as time] |
-| | RepartitionExec: partitioning=RoundRobinBatch(4) |
-| | DeduplicateExec: [tag@2 ASC,time@3 ASC] |
-| | SortPreservingMergeExec: [tag@2 ASC,time@3 ASC] |
-| | UnionExec |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] |
-| | SortExec: [tag@2 ASC,time@3 ASC] |
-| | RecordBatchesExec: batches_groups=1 batches=1 |
-| | |
-+---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
++---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| plan_type | plan |
++---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| logical_plan | Sort: table.tag ASC NULLS LAST |
+| | Projection: table.bar, table.foo, table.tag, table.time |
+| | TableScan: table projection=[bar, foo, tag, time] |
+| physical_plan | SortExec: [tag@2 ASC NULLS LAST] |
+| | CoalescePartitionsExec |
+| | ProjectionExec: expr=[bar@0 as bar, foo@1 as foo, tag@2 as tag, time@3 as time] |
+| | RepartitionExec: partitioning=RoundRobinBatch(4) |
+| | DeduplicateExec: [tag@2 ASC,time@3 ASC] |
+| | SortPreservingMergeExec: [tag@2 ASC,time@3 ASC] |
+| | UnionExec |
+| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] |
+| | SortExec: [tag@2 ASC,time@3 ASC] |
+| | RecordBatchesExec: batches_groups=1 batches=1 |
+| | |
++---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-- SQL: SELECT * FROM "table" WHERE tag='A';
+-----+-----+-----+----------------------+
| bar | foo | tag | time |
@@ -34,24 +34,24 @@
+-----+-----+-----+----------------------+
-- SQL: EXPLAIN SELECT * FROM "table" WHERE tag='A';
-- Results After Normalizing UUIDs
-+---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| plan_type | plan |
-+---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| logical_plan | Projection: table.bar, table.foo, table.tag, table.time |
-| | Filter: table.tag = Dictionary(Int32, Utf8("A")) |
-| | TableScan: table projection=[bar, foo, tag, time], partial_filters=[table.tag = Dictionary(Int32, Utf8("A"))] |
-| physical_plan | ProjectionExec: expr=[bar@0 as bar, foo@1 as foo, tag@2 as tag, time@3 as time] |
-| | CoalesceBatchesExec: target_batch_size=4096 |
-| | FilterExec: tag@2 = A |
-| | RepartitionExec: partitioning=RoundRobinBatch(4) |
-| | DeduplicateExec: [tag@2 ASC,time@3 ASC] |
-| | SortPreservingMergeExec: [tag@2 ASC,time@3 ASC] |
-| | UnionExec |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=tag = Dictionary(Int32, Utf8("A")), pruning_predicate=tag_min@0 <= A AND A <= tag_max@1, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] |
-| | SortExec: [tag@2 ASC,time@3 ASC] |
-| | RecordBatchesExec: batches_groups=1 batches=1 |
-| | |
-+---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
++---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| plan_type | plan |
++---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| logical_plan | Projection: table.bar, table.foo, table.tag, table.time |
+| | Filter: table.tag = Dictionary(Int32, Utf8("A")) |
+| | TableScan: table projection=[bar, foo, tag, time], partial_filters=[table.tag = Dictionary(Int32, Utf8("A"))] |
+| physical_plan | ProjectionExec: expr=[bar@0 as bar, foo@1 as foo, tag@2 as tag, time@3 as time] |
+| | CoalesceBatchesExec: target_batch_size=4096 |
+| | FilterExec: tag@2 = A |
+| | RepartitionExec: partitioning=RoundRobinBatch(4) |
+| | DeduplicateExec: [tag@2 ASC,time@3 ASC] |
+| | SortPreservingMergeExec: [tag@2 ASC,time@3 ASC] |
+| | UnionExec |
+| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=tag = Dictionary(Int32, Utf8("A")), pruning_predicate=tag_min@0 <= A AND A <= tag_max@1, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] |
+| | SortExec: [tag@2 ASC,time@3 ASC] |
+| | RecordBatchesExec: batches_groups=1 batches=1 |
+| | |
++---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-- SQL: SELECT * FROM "table" WHERE foo=1 AND bar=2;
+-----+-----+-----+----------------------+
| bar | foo | tag | time |
@@ -60,24 +60,24 @@
+-----+-----+-----+----------------------+
-- SQL: EXPLAIN SELECT * FROM "table" WHERE foo=1 AND bar=2;
-- Results After Normalizing UUIDs
-+---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| plan_type | plan |
-+---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| logical_plan | Projection: table.bar, table.foo, table.tag, table.time |
-| | Filter: table.foo = Float64(1) AND table.bar = Float64(2) |
-| | TableScan: table projection=[bar, foo, tag, time], partial_filters=[table.foo = Float64(1), table.bar = Float64(2)] |
-| physical_plan | ProjectionExec: expr=[bar@0 as bar, foo@1 as foo, tag@2 as tag, time@3 as time] |
-| | CoalesceBatchesExec: target_batch_size=4096 |
-| | FilterExec: foo@1 = 1 AND bar@0 = 2 |
-| | RepartitionExec: partitioning=RoundRobinBatch(4) |
-| | DeduplicateExec: [tag@2 ASC,time@3 ASC] |
-| | SortPreservingMergeExec: [tag@2 ASC,time@3 ASC] |
-| | UnionExec |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] |
-| | SortExec: [tag@2 ASC,time@3 ASC] |
-| | RecordBatchesExec: batches_groups=1 batches=1 |
-| | |
-+---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
++---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| plan_type | plan |
++---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| logical_plan | Projection: table.bar, table.foo, table.tag, table.time |
+| | Filter: table.foo = Float64(1) AND table.bar = Float64(2) |
+| | TableScan: table projection=[bar, foo, tag, time], partial_filters=[table.foo = Float64(1), table.bar = Float64(2)] |
+| physical_plan | ProjectionExec: expr=[bar@0 as bar, foo@1 as foo, tag@2 as tag, time@3 as time] |
+| | CoalesceBatchesExec: target_batch_size=4096 |
+| | FilterExec: foo@1 = 1 AND bar@0 = 2 |
+| | RepartitionExec: partitioning=RoundRobinBatch(4) |
+| | DeduplicateExec: [tag@2 ASC,time@3 ASC] |
+| | SortPreservingMergeExec: [tag@2 ASC,time@3 ASC] |
+| | UnionExec |
+| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] |
+| | SortExec: [tag@2 ASC,time@3 ASC] |
+| | RecordBatchesExec: batches_groups=1 batches=1 |
+| | |
++---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-- SQL: SELECT * FROM "table" WHERE time=to_timestamp('1970-01-01T00:00:00.000000000+00:00') ORDER BY tag;
+-----+-----+-----+----------------------+
| bar | foo | tag | time |
@@ -87,27 +87,27 @@
+-----+-----+-----+----------------------+
-- SQL: EXPLAIN SELECT * FROM "table" WHERE time=to_timestamp('1970-01-01T00:00:00.000000000+00:00') ORDER BY tag;
-- Results After Normalizing UUIDs
-+---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| plan_type | plan |
-+---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| logical_plan | Sort: table.tag ASC NULLS LAST |
-| | Projection: table.bar, table.foo, table.tag, table.time |
-| | Filter: table.time = TimestampNanosecond(0, None) |
-| | TableScan: table projection=[bar, foo, tag, time], partial_filters=[table.time = TimestampNanosecond(0, None)] |
-| physical_plan | SortExec: [tag@2 ASC NULLS LAST] |
-| | CoalescePartitionsExec |
-| | ProjectionExec: expr=[bar@0 as bar, foo@1 as foo, tag@2 as tag, time@3 as time] |
-| | CoalesceBatchesExec: target_batch_size=4096 |
-| | FilterExec: time@3 = 0 |
-| | RepartitionExec: partitioning=RoundRobinBatch(4) |
-| | DeduplicateExec: [tag@2 ASC,time@3 ASC] |
-| | SortPreservingMergeExec: [tag@2 ASC,time@3 ASC] |
-| | UnionExec |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=time = TimestampNanosecond(0, None), pruning_predicate=time_min@0 <= 0 AND 0 <= time_max@1, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] |
-| | SortExec: [tag@2 ASC,time@3 ASC] |
-| | RecordBatchesExec: batches_groups=1 batches=1 |
-| | |
-+---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
++---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| plan_type | plan |
++---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| logical_plan | Sort: table.tag ASC NULLS LAST |
+| | Projection: table.bar, table.foo, table.tag, table.time |
+| | Filter: table.time = TimestampNanosecond(0, None) |
+| | TableScan: table projection=[bar, foo, tag, time], partial_filters=[table.time = TimestampNanosecond(0, None)] |
+| physical_plan | SortExec: [tag@2 ASC NULLS LAST] |
+| | CoalescePartitionsExec |
+| | ProjectionExec: expr=[bar@0 as bar, foo@1 as foo, tag@2 as tag, time@3 as time] |
+| | CoalesceBatchesExec: target_batch_size=4096 |
+| | FilterExec: time@3 = 0 |
+| | RepartitionExec: partitioning=RoundRobinBatch(4) |
+| | DeduplicateExec: [tag@2 ASC,time@3 ASC] |
+| | SortPreservingMergeExec: [tag@2 ASC,time@3 ASC] |
+| | UnionExec |
+| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=time = TimestampNanosecond(0, None), pruning_predicate=time_min@0 <= 0 AND 0 <= time_max@1, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] |
+| | SortExec: [tag@2 ASC,time@3 ASC] |
+| | RecordBatchesExec: batches_groups=1 batches=1 |
+| | |
++---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-- SQL: SELECT * FROM "table" WHERE tag='A' AND foo=1 AND time=to_timestamp('1970-01-01T00:00:00.000000000+00:00');
+-----+-----+-----+----------------------+
| bar | foo | tag | time |
@@ -116,21 +116,21 @@
+-----+-----+-----+----------------------+
-- SQL: EXPLAIN SELECT * FROM "table" WHERE tag='A' AND foo=1 AND time=to_timestamp('1970-01-01T00:00:00.000000000+00:00');
-- Results After Normalizing UUIDs
-+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| plan_type | plan |
-+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| logical_plan | Projection: table.bar, table.foo, table.tag, table.time |
-| | Filter: table.tag = Dictionary(Int32, Utf8("A")) AND table.foo = Float64(1) AND table.time = TimestampNanosecond(0, None) |
-| | TableScan: table projection=[bar, foo, tag, time], partial_filters=[table.tag = Dictionary(Int32, Utf8("A")), table.foo = Float64(1), table.time = TimestampNanosecond(0, None)] |
-| physical_plan | ProjectionExec: expr=[bar@0 as bar, foo@1 as foo, tag@2 as tag, time@3 as time] |
-| | CoalesceBatchesExec: target_batch_size=4096 |
-| | FilterExec: tag@2 = A AND foo@1 = 1 AND time@3 = 0 |
-| | RepartitionExec: partitioning=RoundRobinBatch(4) |
-| | DeduplicateExec: [tag@2 ASC,time@3 ASC] |
-| | SortPreservingMergeExec: [tag@2 ASC,time@3 ASC] |
-| | UnionExec |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=tag = Dictionary(Int32, Utf8("A")) AND time = TimestampNanosecond(0, None), pruning_predicate=tag_min@0 <= A AND A <= tag_max@1 AND time_min@2 <= 0 AND 0 <= time_max@3, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] |
-| | SortExec: [tag@2 ASC,time@3 ASC] |
-| | RecordBatchesExec: batches_groups=1 batches=1 |
-| | |
-+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
++---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| plan_type | plan |
++---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| logical_plan | Projection: table.bar, table.foo, table.tag, table.time |
+| | Filter: table.tag = Dictionary(Int32, Utf8("A")) AND table.foo = Float64(1) AND table.time = TimestampNanosecond(0, None) |
+| | TableScan: table projection=[bar, foo, tag, time], partial_filters=[table.tag = Dictionary(Int32, Utf8("A")), table.foo = Float64(1), table.time = TimestampNanosecond(0, None)] |
+| physical_plan | ProjectionExec: expr=[bar@0 as bar, foo@1 as foo, tag@2 as tag, time@3 as time] |
+| | CoalesceBatchesExec: target_batch_size=4096 |
+| | FilterExec: tag@2 = A AND foo@1 = 1 AND time@3 = 0 |
+| | RepartitionExec: partitioning=RoundRobinBatch(4) |
+| | DeduplicateExec: [tag@2 ASC,time@3 ASC] |
+| | SortPreservingMergeExec: [tag@2 ASC,time@3 ASC] |
+| | UnionExec |
+| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=tag = Dictionary(Int32, Utf8("A")) AND time = TimestampNanosecond(0, None), pruning_predicate=tag_min@0 <= A AND A <= tag_max@1 AND time_min@2 <= 0 AND 0 <= time_max@3, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] |
+| | SortExec: [tag@2 ASC,time@3 ASC] |
+| | RecordBatchesExec: batches_groups=1 batches=1 |
+| | |
++---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
diff --git a/query_tests/cases/in/duplicates_ingester.expected b/query_tests/cases/in/duplicates_ingester.expected
index 17c5b66ac6..3c5fa34bbe 100644
--- a/query_tests/cases/in/duplicates_ingester.expected
+++ b/query_tests/cases/in/duplicates_ingester.expected
@@ -1,91 +1,91 @@
-- Test Setup: OneMeasurementFourChunksWithDuplicatesWithIngester
-- SQL: explain select time, state, city, min_temp, max_temp, area from h2o order by time, state, city;
-- Results After Normalizing UUIDs
-+---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| plan_type | plan |
-+---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| logical_plan | Sort: h2o.time ASC NULLS LAST, h2o.state ASC NULLS LAST, h2o.city ASC NULLS LAST |
-| | Projection: h2o.time, h2o.state, h2o.city, h2o.min_temp, h2o.max_temp, h2o.area |
-| | TableScan: h2o projection=[area, city, max_temp, min_temp, state, time] |
-| physical_plan | SortExec: [time@0 ASC NULLS LAST,state@1 ASC NULLS LAST,city@2 ASC NULLS LAST] |
-| | CoalescePartitionsExec |
-| | ProjectionExec: expr=[time@5 as time, state@4 as state, city@1 as city, min_temp@3 as min_temp, max_temp@2 as max_temp, area@0 as area] |
-| | RepartitionExec: partitioning=RoundRobinBatch(4) |
-| | UnionExec |
-| | DeduplicateExec: [state@4 ASC,city@1 ASC,time@5 ASC] |
-| | SortPreservingMergeExec: [state@4 ASC,city@1 ASC,time@5 ASC] |
-| | UnionExec |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], output_ordering=[state@4 ASC, city@1 ASC, time@5 ASC], projection=[area, city, max_temp, min_temp, state, time] |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet], output_ordering=[state@4 ASC, city@1 ASC, time@5 ASC], projection=[area, city, max_temp, min_temp, state, time] |
-| | DeduplicateExec: [city@1 ASC,state@4 ASC,time@5 ASC] |
-| | SortExec: [city@1 ASC,state@4 ASC,time@5 ASC] |
-| | RecordBatchesExec: batches_groups=1 batches=1 |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000002.parquet], projection=[area, city, max_temp, min_temp, state, time] |
-| | |
-+---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
++---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| plan_type | plan |
++---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| logical_plan | Sort: h2o.time ASC NULLS LAST, h2o.state ASC NULLS LAST, h2o.city ASC NULLS LAST |
+| | Projection: h2o.time, h2o.state, h2o.city, h2o.min_temp, h2o.max_temp, h2o.area |
+| | TableScan: h2o projection=[area, city, max_temp, min_temp, state, time] |
+| physical_plan | SortExec: [time@0 ASC NULLS LAST,state@1 ASC NULLS LAST,city@2 ASC NULLS LAST] |
+| | CoalescePartitionsExec |
+| | ProjectionExec: expr=[time@5 as time, state@4 as state, city@1 as city, min_temp@3 as min_temp, max_temp@2 as max_temp, area@0 as area] |
+| | RepartitionExec: partitioning=RoundRobinBatch(4) |
+| | UnionExec |
+| | DeduplicateExec: [state@4 ASC,city@1 ASC,time@5 ASC] |
+| | SortPreservingMergeExec: [state@4 ASC,city@1 ASC,time@5 ASC] |
+| | UnionExec |
+| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, output_ordering=[state@4 ASC, city@1 ASC, time@5 ASC], projection=[area, city, max_temp, min_temp, state, time] |
+| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet]]}, output_ordering=[state@4 ASC, city@1 ASC, time@5 ASC], projection=[area, city, max_temp, min_temp, state, time] |
+| | DeduplicateExec: [city@1 ASC,state@4 ASC,time@5 ASC] |
+| | SortExec: [city@1 ASC,state@4 ASC,time@5 ASC] |
+| | RecordBatchesExec: batches_groups=1 batches=1 |
+| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000002.parquet]]}, projection=[area, city, max_temp, min_temp, state, time] |
+| | |
++---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-- SQL: EXPLAIN select time, state, city, min_temp, max_temp, area from h2o;
-- Results After Normalizing UUIDs
-+---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| plan_type | plan |
-+---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| logical_plan | Projection: h2o.time, h2o.state, h2o.city, h2o.min_temp, h2o.max_temp, h2o.area |
-| | TableScan: h2o projection=[area, city, max_temp, min_temp, state, time] |
-| physical_plan | ProjectionExec: expr=[time@5 as time, state@4 as state, city@1 as city, min_temp@3 as min_temp, max_temp@2 as max_temp, area@0 as area] |
-| | UnionExec |
-| | DeduplicateExec: [state@4 ASC,city@1 ASC,time@5 ASC] |
-| | SortPreservingMergeExec: [state@4 ASC,city@1 ASC,time@5 ASC] |
-| | UnionExec |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], output_ordering=[state@4 ASC, city@1 ASC, time@5 ASC], projection=[area, city, max_temp, min_temp, state, time] |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet], output_ordering=[state@4 ASC, city@1 ASC, time@5 ASC], projection=[area, city, max_temp, min_temp, state, time] |
-| | DeduplicateExec: [city@1 ASC,state@4 ASC,time@5 ASC] |
-| | SortExec: [city@1 ASC,state@4 ASC,time@5 ASC] |
-| | RecordBatchesExec: batches_groups=1 batches=1 |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000002.parquet], projection=[area, city, max_temp, min_temp, state, time] |
-| | |
-+---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
++---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| plan_type | plan |
++---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| logical_plan | Projection: h2o.time, h2o.state, h2o.city, h2o.min_temp, h2o.max_temp, h2o.area |
+| | TableScan: h2o projection=[area, city, max_temp, min_temp, state, time] |
+| physical_plan | ProjectionExec: expr=[time@5 as time, state@4 as state, city@1 as city, min_temp@3 as min_temp, max_temp@2 as max_temp, area@0 as area] |
+| | UnionExec |
+| | DeduplicateExec: [state@4 ASC,city@1 ASC,time@5 ASC] |
+| | SortPreservingMergeExec: [state@4 ASC,city@1 ASC,time@5 ASC] |
+| | UnionExec |
+| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, output_ordering=[state@4 ASC, city@1 ASC, time@5 ASC], projection=[area, city, max_temp, min_temp, state, time] |
+| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet]]}, output_ordering=[state@4 ASC, city@1 ASC, time@5 ASC], projection=[area, city, max_temp, min_temp, state, time] |
+| | DeduplicateExec: [city@1 ASC,state@4 ASC,time@5 ASC] |
+| | SortExec: [city@1 ASC,state@4 ASC,time@5 ASC] |
+| | RecordBatchesExec: batches_groups=1 batches=1 |
+| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000002.parquet]]}, projection=[area, city, max_temp, min_temp, state, time] |
+| | |
++---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-- SQL: EXPLAIN select state as name from h2o UNION ALL select city as name from h2o;
-- Results After Normalizing UUIDs
-+---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| plan_type | plan |
-+---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| logical_plan | Union |
-| | Projection: h2o.state AS name |
-| | TableScan: h2o projection=[state] |
-| | Projection: h2o.city AS name |
-| | TableScan: h2o projection=[city] |
-| physical_plan | UnionExec |
-| | ProjectionExec: expr=[state@0 as name] |
-| | UnionExec |
-| | ProjectionExec: expr=[state@1 as state] |
-| | RepartitionExec: partitioning=RoundRobinBatch(4) |
-| | DeduplicateExec: [state@1 ASC,city@0 ASC,time@2 ASC] |
-| | SortPreservingMergeExec: [state@1 ASC,city@0 ASC,time@2 ASC] |
-| | UnionExec |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], output_ordering=[state@1 ASC, city@0 ASC, time@2 ASC], projection=[city, state, time] |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet], output_ordering=[state@1 ASC, city@0 ASC, time@2 ASC], projection=[city, state, time] |
-| | ProjectionExec: expr=[state@1 as state] |
-| | RepartitionExec: partitioning=RoundRobinBatch(4) |
-| | DeduplicateExec: [city@0 ASC,state@1 ASC,time@2 ASC] |
-| | SortExec: [city@0 ASC,state@1 ASC,time@2 ASC] |
-| | RecordBatchesExec: batches_groups=1 batches=1 |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000002.parquet], projection=[state] |
-| | ProjectionExec: expr=[city@0 as name] |
-| | UnionExec |
-| | ProjectionExec: expr=[city@0 as city] |
-| | RepartitionExec: partitioning=RoundRobinBatch(4) |
-| | DeduplicateExec: [state@1 ASC,city@0 ASC,time@2 ASC] |
-| | SortPreservingMergeExec: [state@1 ASC,city@0 ASC,time@2 ASC] |
-| | UnionExec |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], output_ordering=[state@1 ASC, city@0 ASC, time@2 ASC], projection=[city, state, time] |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet], output_ordering=[state@1 ASC, city@0 ASC, time@2 ASC], projection=[city, state, time] |
-| | ProjectionExec: expr=[city@0 as city] |
-| | RepartitionExec: partitioning=RoundRobinBatch(4) |
-| | DeduplicateExec: [city@0 ASC,state@1 ASC,time@2 ASC] |
-| | SortExec: [city@0 ASC,state@1 ASC,time@2 ASC] |
-| | RecordBatchesExec: batches_groups=1 batches=1 |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000002.parquet], projection=[city] |
-| | |
-+---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
++---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| plan_type | plan |
++---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| logical_plan | Union |
+| | Projection: h2o.state AS name |
+| | TableScan: h2o projection=[state] |
+| | Projection: h2o.city AS name |
+| | TableScan: h2o projection=[city] |
+| physical_plan | UnionExec |
+| | ProjectionExec: expr=[state@0 as name] |
+| | UnionExec |
+| | ProjectionExec: expr=[state@1 as state] |
+| | RepartitionExec: partitioning=RoundRobinBatch(4) |
+| | DeduplicateExec: [state@1 ASC,city@0 ASC,time@2 ASC] |
+| | SortPreservingMergeExec: [state@1 ASC,city@0 ASC,time@2 ASC] |
+| | UnionExec |
+| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, output_ordering=[state@1 ASC, city@0 ASC, time@2 ASC], projection=[city, state, time] |
+| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet]]}, output_ordering=[state@1 ASC, city@0 ASC, time@2 ASC], projection=[city, state, time] |
+| | ProjectionExec: expr=[state@1 as state] |
+| | RepartitionExec: partitioning=RoundRobinBatch(4) |
+| | DeduplicateExec: [city@0 ASC,state@1 ASC,time@2 ASC] |
+| | SortExec: [city@0 ASC,state@1 ASC,time@2 ASC] |
+| | RecordBatchesExec: batches_groups=1 batches=1 |
+| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000002.parquet]]}, projection=[state] |
+| | ProjectionExec: expr=[city@0 as name] |
+| | UnionExec |
+| | ProjectionExec: expr=[city@0 as city] |
+| | RepartitionExec: partitioning=RoundRobinBatch(4) |
+| | DeduplicateExec: [state@1 ASC,city@0 ASC,time@2 ASC] |
+| | SortPreservingMergeExec: [state@1 ASC,city@0 ASC,time@2 ASC] |
+| | UnionExec |
+| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, output_ordering=[state@1 ASC, city@0 ASC, time@2 ASC], projection=[city, state, time] |
+| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet]]}, output_ordering=[state@1 ASC, city@0 ASC, time@2 ASC], projection=[city, state, time] |
+| | ProjectionExec: expr=[city@0 as city] |
+| | RepartitionExec: partitioning=RoundRobinBatch(4) |
+| | DeduplicateExec: [city@0 ASC,state@1 ASC,time@2 ASC] |
+| | SortExec: [city@0 ASC,state@1 ASC,time@2 ASC] |
+| | RecordBatchesExec: batches_groups=1 batches=1 |
+| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000002.parquet]]}, projection=[city] |
+| | |
++---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-- SQL: select count(*) from h2o;
+-----------------+
| COUNT(UInt8(1)) |
diff --git a/query_tests/cases/in/duplicates_parquet.expected b/query_tests/cases/in/duplicates_parquet.expected
index 81f7a0b091..57f3aee984 100644
--- a/query_tests/cases/in/duplicates_parquet.expected
+++ b/query_tests/cases/in/duplicates_parquet.expected
@@ -1,75 +1,75 @@
-- Test Setup: OneMeasurementFourChunksWithDuplicatesParquetOnly
-- SQL: explain select time, state, city, min_temp, max_temp, area from h2o order by time, state, city;
-- Results After Normalizing UUIDs
-+---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| plan_type | plan |
-+---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| logical_plan | Sort: h2o.time ASC NULLS LAST, h2o.state ASC NULLS LAST, h2o.city ASC NULLS LAST |
-| | Projection: h2o.time, h2o.state, h2o.city, h2o.min_temp, h2o.max_temp, h2o.area |
-| | TableScan: h2o projection=[area, city, max_temp, min_temp, state, time] |
-| physical_plan | SortExec: [time@0 ASC NULLS LAST,state@1 ASC NULLS LAST,city@2 ASC NULLS LAST] |
-| | CoalescePartitionsExec |
-| | ProjectionExec: expr=[time@5 as time, state@4 as state, city@1 as city, min_temp@3 as min_temp, max_temp@2 as max_temp, area@0 as area] |
-| | RepartitionExec: partitioning=RoundRobinBatch(4) |
-| | UnionExec |
-| | DeduplicateExec: [state@4 ASC,city@1 ASC,time@5 ASC] |
-| | SortPreservingMergeExec: [state@4 ASC,city@1 ASC,time@5 ASC] |
-| | UnionExec |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], output_ordering=[state@4 ASC, city@1 ASC, time@5 ASC], projection=[area, city, max_temp, min_temp, state, time] |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet], output_ordering=[state@4 ASC, city@1 ASC, time@5 ASC], projection=[area, city, max_temp, min_temp, state, time] |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000002.parquet, 1/1/1/1/00000000-0000-0000-0000-000000000003.parquet], projection=[area, city, max_temp, min_temp, state, time] |
-| | |
-+---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
++---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| plan_type | plan |
++---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| logical_plan | Sort: h2o.time ASC NULLS LAST, h2o.state ASC NULLS LAST, h2o.city ASC NULLS LAST |
+| | Projection: h2o.time, h2o.state, h2o.city, h2o.min_temp, h2o.max_temp, h2o.area |
+| | TableScan: h2o projection=[area, city, max_temp, min_temp, state, time] |
+| physical_plan | SortExec: [time@0 ASC NULLS LAST,state@1 ASC NULLS LAST,city@2 ASC NULLS LAST] |
+| | CoalescePartitionsExec |
+| | ProjectionExec: expr=[time@5 as time, state@4 as state, city@1 as city, min_temp@3 as min_temp, max_temp@2 as max_temp, area@0 as area] |
+| | RepartitionExec: partitioning=RoundRobinBatch(4) |
+| | UnionExec |
+| | DeduplicateExec: [state@4 ASC,city@1 ASC,time@5 ASC] |
+| | SortPreservingMergeExec: [state@4 ASC,city@1 ASC,time@5 ASC] |
+| | UnionExec |
+| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, output_ordering=[state@4 ASC, city@1 ASC, time@5 ASC], projection=[area, city, max_temp, min_temp, state, time] |
+| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet]]}, output_ordering=[state@4 ASC, city@1 ASC, time@5 ASC], projection=[area, city, max_temp, min_temp, state, time] |
+| | ParquetExec: limit=None, partitions={2 groups: [[1/1/1/1/00000000-0000-0000-0000-000000000002.parquet], [1/1/1/1/00000000-0000-0000-0000-000000000003.parquet]]}, projection=[area, city, max_temp, min_temp, state, time] |
+| | |
++---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-- SQL: EXPLAIN select time, state, city, min_temp, max_temp, area from h2o;
-- Results After Normalizing UUIDs
-+---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| plan_type | plan |
-+---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| logical_plan | Projection: h2o.time, h2o.state, h2o.city, h2o.min_temp, h2o.max_temp, h2o.area |
-| | TableScan: h2o projection=[area, city, max_temp, min_temp, state, time] |
-| physical_plan | ProjectionExec: expr=[time@5 as time, state@4 as state, city@1 as city, min_temp@3 as min_temp, max_temp@2 as max_temp, area@0 as area] |
-| | UnionExec |
-| | DeduplicateExec: [state@4 ASC,city@1 ASC,time@5 ASC] |
-| | SortPreservingMergeExec: [state@4 ASC,city@1 ASC,time@5 ASC] |
-| | UnionExec |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], output_ordering=[state@4 ASC, city@1 ASC, time@5 ASC], projection=[area, city, max_temp, min_temp, state, time] |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet], output_ordering=[state@4 ASC, city@1 ASC, time@5 ASC], projection=[area, city, max_temp, min_temp, state, time] |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000002.parquet, 1/1/1/1/00000000-0000-0000-0000-000000000003.parquet], projection=[area, city, max_temp, min_temp, state, time] |
-| | |
-+---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
++---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| plan_type | plan |
++---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| logical_plan | Projection: h2o.time, h2o.state, h2o.city, h2o.min_temp, h2o.max_temp, h2o.area |
+| | TableScan: h2o projection=[area, city, max_temp, min_temp, state, time] |
+| physical_plan | ProjectionExec: expr=[time@5 as time, state@4 as state, city@1 as city, min_temp@3 as min_temp, max_temp@2 as max_temp, area@0 as area] |
+| | UnionExec |
+| | DeduplicateExec: [state@4 ASC,city@1 ASC,time@5 ASC] |
+| | SortPreservingMergeExec: [state@4 ASC,city@1 ASC,time@5 ASC] |
+| | UnionExec |
+| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, output_ordering=[state@4 ASC, city@1 ASC, time@5 ASC], projection=[area, city, max_temp, min_temp, state, time] |
+| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet]]}, output_ordering=[state@4 ASC, city@1 ASC, time@5 ASC], projection=[area, city, max_temp, min_temp, state, time] |
+| | ParquetExec: limit=None, partitions={2 groups: [[1/1/1/1/00000000-0000-0000-0000-000000000002.parquet], [1/1/1/1/00000000-0000-0000-0000-000000000003.parquet]]}, projection=[area, city, max_temp, min_temp, state, time] |
+| | |
++---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-- SQL: EXPLAIN select state as name from h2o UNION ALL select city as name from h2o;
-- Results After Normalizing UUIDs
-+---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| plan_type | plan |
-+---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| logical_plan | Union |
-| | Projection: h2o.state AS name |
-| | TableScan: h2o projection=[state] |
-| | Projection: h2o.city AS name |
-| | TableScan: h2o projection=[city] |
-| physical_plan | UnionExec |
-| | ProjectionExec: expr=[state@0 as name] |
-| | UnionExec |
-| | ProjectionExec: expr=[state@1 as state] |
-| | RepartitionExec: partitioning=RoundRobinBatch(4) |
-| | DeduplicateExec: [state@1 ASC,city@0 ASC,time@2 ASC] |
-| | SortPreservingMergeExec: [state@1 ASC,city@0 ASC,time@2 ASC] |
-| | UnionExec |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], output_ordering=[state@1 ASC, city@0 ASC, time@2 ASC], projection=[city, state, time] |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet], output_ordering=[state@1 ASC, city@0 ASC, time@2 ASC], projection=[city, state, time] |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000002.parquet, 1/1/1/1/00000000-0000-0000-0000-000000000003.parquet], projection=[state] |
-| | ProjectionExec: expr=[city@0 as name] |
-| | UnionExec |
-| | ProjectionExec: expr=[city@0 as city] |
-| | RepartitionExec: partitioning=RoundRobinBatch(4) |
-| | DeduplicateExec: [state@1 ASC,city@0 ASC,time@2 ASC] |
-| | SortPreservingMergeExec: [state@1 ASC,city@0 ASC,time@2 ASC] |
-| | UnionExec |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], output_ordering=[state@1 ASC, city@0 ASC, time@2 ASC], projection=[city, state, time] |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet], output_ordering=[state@1 ASC, city@0 ASC, time@2 ASC], projection=[city, state, time] |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000002.parquet, 1/1/1/1/00000000-0000-0000-0000-000000000003.parquet], projection=[city] |
-| | |
-+---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
++---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| plan_type | plan |
++---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| logical_plan | Union |
+| | Projection: h2o.state AS name |
+| | TableScan: h2o projection=[state] |
+| | Projection: h2o.city AS name |
+| | TableScan: h2o projection=[city] |
+| physical_plan | UnionExec |
+| | ProjectionExec: expr=[state@0 as name] |
+| | UnionExec |
+| | ProjectionExec: expr=[state@1 as state] |
+| | RepartitionExec: partitioning=RoundRobinBatch(4) |
+| | DeduplicateExec: [state@1 ASC,city@0 ASC,time@2 ASC] |
+| | SortPreservingMergeExec: [state@1 ASC,city@0 ASC,time@2 ASC] |
+| | UnionExec |
+| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, output_ordering=[state@1 ASC, city@0 ASC, time@2 ASC], projection=[city, state, time] |
+| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet]]}, output_ordering=[state@1 ASC, city@0 ASC, time@2 ASC], projection=[city, state, time] |
+| | ParquetExec: limit=None, partitions={2 groups: [[1/1/1/1/00000000-0000-0000-0000-000000000002.parquet], [1/1/1/1/00000000-0000-0000-0000-000000000003.parquet]]}, projection=[state] |
+| | ProjectionExec: expr=[city@0 as name] |
+| | UnionExec |
+| | ProjectionExec: expr=[city@0 as city] |
+| | RepartitionExec: partitioning=RoundRobinBatch(4) |
+| | DeduplicateExec: [state@1 ASC,city@0 ASC,time@2 ASC] |
+| | SortPreservingMergeExec: [state@1 ASC,city@0 ASC,time@2 ASC] |
+| | UnionExec |
+| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, output_ordering=[state@1 ASC, city@0 ASC, time@2 ASC], projection=[city, state, time] |
+| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet]]}, output_ordering=[state@1 ASC, city@0 ASC, time@2 ASC], projection=[city, state, time] |
+| | ParquetExec: limit=None, partitions={2 groups: [[1/1/1/1/00000000-0000-0000-0000-000000000002.parquet], [1/1/1/1/00000000-0000-0000-0000-000000000003.parquet]]}, projection=[city] |
+| | |
++---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-- SQL: select count(*) from h2o;
+-----------------+
| COUNT(UInt8(1)) |
@@ -91,8 +91,8 @@
| | DeduplicateExec: [state@4 ASC,city@1 ASC,time@5 ASC], metrics=[elapsed_compute=1.234ms, mem_used=0, num_dupes=2, output_rows=5, spill_count=0, spilled_bytes=0] |
| | SortPreservingMergeExec: [state@4 ASC,city@1 ASC,time@5 ASC], metrics=[elapsed_compute=1.234ms, mem_used=0, output_rows=7, spill_count=0, spilled_bytes=0] |
| | UnionExec, metrics=[elapsed_compute=1.234ms, mem_used=0, output_rows=7, spill_count=0, spilled_bytes=0] |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=state = Dictionary(Int32, Utf8("MA")), pruning_predicate=state_min@0 <= MA AND MA <= state_max@1, output_ordering=[state@4 ASC, city@1 ASC, time@5 ASC], projection=[area, city, max_temp, min_temp, state, time], metrics=[bytes_scanned=474, elapsed_compute=1.234ms, mem_used=0, num_predicate_creation_errors=0, output_rows=4, page_index_eval_time=1.234ms, page_index_rows_filtered=0, predicate_evaluation_errors=0, pushdown_eval_time=1.234ms, pushdown_rows_filtered=0, row_groups_pruned=0, spill_count=0, spilled_bytes=0, time_elapsed_opening=1.234ms, time_elapsed_processing=1.234ms, time_elapsed_scanning=1.234ms] |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet], predicate=state = Dictionary(Int32, Utf8("MA")), pruning_predicate=state_min@0 <= MA AND MA <= state_max@1, output_ordering=[state@4 ASC, city@1 ASC, time@5 ASC], projection=[area, city, max_temp, min_temp, state, time], metrics=[bytes_scanned=632, elapsed_compute=1.234ms, mem_used=0, num_predicate_creation_errors=0, output_rows=3, page_index_eval_time=1.234ms, page_index_rows_filtered=0, predicate_evaluation_errors=0, pushdown_eval_time=1.234ms, pushdown_rows_filtered=3, row_groups_pruned=0, spill_count=0, spilled_bytes=0, time_elapsed_opening=1.234ms, time_elapsed_processing=1.234ms, time_elapsed_scanning=1.234ms] |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000002.parquet, 1/1/1/1/00000000-0000-0000-0000-000000000003.parquet], predicate=state = Dictionary(Int32, Utf8("MA")), pruning_predicate=state_min@0 <= MA AND MA <= state_max@1, projection=[area, city, max_temp, min_temp, state, time], metrics=[bytes_scanned=1219, elapsed_compute=1.234ms, mem_used=0, num_predicate_creation_errors=0, output_rows=5, page_index_eval_time=1.234ms, page_index_rows_filtered=0, predicate_evaluation_errors=0, pushdown_eval_time=1.234ms, pushdown_rows_filtered=5, row_groups_pruned=0, spill_count=0, spilled_bytes=0, time_elapsed_opening=1.234ms, time_elapsed_processing=1.234ms, time_elapsed_scanning=1.234ms] |
+| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=state = Dictionary(Int32, Utf8("MA")), pruning_predicate=state_min@0 <= MA AND MA <= state_max@1, output_ordering=[state@4 ASC, city@1 ASC, time@5 ASC], projection=[area, city, max_temp, min_temp, state, time], metrics=[bytes_scanned=474, elapsed_compute=1.234ms, mem_used=0, num_predicate_creation_errors=0, output_rows=4, page_index_eval_time=1.234ms, page_index_rows_filtered=0, predicate_evaluation_errors=0, pushdown_eval_time=1.234ms, pushdown_rows_filtered=0, row_groups_pruned=0, spill_count=0, spilled_bytes=0, time_elapsed_opening=1.234ms, time_elapsed_processing=1.234ms, time_elapsed_scanning=1.234ms] |
+| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet]]}, predicate=state = Dictionary(Int32, Utf8("MA")), pruning_predicate=state_min@0 <= MA AND MA <= state_max@1, output_ordering=[state@4 ASC, city@1 ASC, time@5 ASC], projection=[area, city, max_temp, min_temp, state, time], metrics=[bytes_scanned=632, elapsed_compute=1.234ms, mem_used=0, num_predicate_creation_errors=0, output_rows=3, page_index_eval_time=1.234ms, page_index_rows_filtered=0, predicate_evaluation_errors=0, pushdown_eval_time=1.234ms, pushdown_rows_filtered=3, row_groups_pruned=0, spill_count=0, spilled_bytes=0, time_elapsed_opening=1.234ms, time_elapsed_processing=1.234ms, time_elapsed_scanning=1.234ms] |
+| | ParquetExec: limit=None, partitions={2 groups: [[1/1/1/1/00000000-0000-0000-0000-000000000002.parquet], [1/1/1/1/00000000-0000-0000-0000-000000000003.parquet]]}, predicate=state = Dictionary(Int32, Utf8("MA")), pruning_predicate=state_min@0 <= MA AND MA <= state_max@1, projection=[area, city, max_temp, min_temp, state, time], metrics=[bytes_scanned=1219, elapsed_compute=1.234ms, mem_used=0, num_predicate_creation_errors=0, output_rows=5, page_index_eval_time=1.234ms, page_index_rows_filtered=0, predicate_evaluation_errors=0, pushdown_eval_time=1.234ms, pushdown_rows_filtered=5, row_groups_pruned=0, spill_count=0, spilled_bytes=0, time_elapsed_opening=1.234ms, time_elapsed_processing=1.234ms, time_elapsed_scanning=1.234ms] |
| | |
----------
diff --git a/query_tests/cases/in/duplicates_parquet_many.expected b/query_tests/cases/in/duplicates_parquet_many.expected
index 679b685697..899284c45a 100644
--- a/query_tests/cases/in/duplicates_parquet_many.expected
+++ b/query_tests/cases/in/duplicates_parquet_many.expected
@@ -7,32 +7,32 @@
+-----------------+----------+
-- SQL: EXPLAIN select count(*), sum(f) from m;
-- Results After Normalizing UUIDs
-+---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| plan_type | plan |
-+---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| logical_plan | Projection: COUNT(UInt8(1)), SUM(m.f) |
-| | Aggregate: groupBy=[[]], aggr=[[COUNT(UInt8(1)), SUM(m.f)]] |
-| | TableScan: m projection=[f] |
-| physical_plan | ProjectionExec: expr=[COUNT(UInt8(1))@0 as COUNT(UInt8(1)), SUM(m.f)@1 as SUM(m.f)] |
-| | AggregateExec: mode=Final, gby=[], aggr=[COUNT(UInt8(1)), SUM(m.f)] |
-| | CoalescePartitionsExec |
-| | AggregateExec: mode=Partial, gby=[], aggr=[COUNT(UInt8(1)), SUM(m.f)] |
-| | UnionExec |
-| | ProjectionExec: expr=[f@0 as f] |
-| | RepartitionExec: partitioning=RoundRobinBatch(4) |
-| | DeduplicateExec: [tag@1 ASC,time@2 ASC] |
-| | SortPreservingMergeExec: [tag@1 ASC,time@2 ASC] |
-| | UnionExec |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], output_ordering=[tag@1 ASC, time@2 ASC], projection=[f, tag, time] |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet], output_ordering=[tag@1 ASC, time@2 ASC], projection=[f, tag, time] |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000002.parquet], output_ordering=[tag@1 ASC, time@2 ASC], projection=[f, tag, time] |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000003.parquet], output_ordering=[tag@1 ASC, time@2 ASC], projection=[f, tag, time] |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000004.parquet], output_ordering=[tag@1 ASC, time@2 ASC], projection=[f, tag, time] |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000005.parquet], output_ordering=[tag@1 ASC, time@2 ASC], projection=[f, tag, time] |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000006.parquet], output_ordering=[tag@1 ASC, time@2 ASC], projection=[f, tag, time] |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000007.parquet], output_ordering=[tag@1 ASC, time@2 ASC], projection=[f, tag, time] |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000008.parquet], output_ordering=[tag@1 ASC, time@2 ASC], projection=[f, tag, time] |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000009.parquet], output_ordering=[tag@1 ASC, time@2 ASC], projection=[f, tag, time] |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-00000000000a.parquet, 1/1/1/1/00000000-0000-0000-0000-00000000000b.parquet, 1/1/1/1/00000000-0000-0000-0000-00000000000c.parquet, 1/1/1/1/00000000-0000-0000-0000-00000000000d.parquet, 1/1/1/1/00000000-0000-0000-0000-00000000000e.parquet, 1/1/1/1/00000000-0000-0000-0000-00000000000f.parquet, 1/1/1/1/00000000-0000-0000-0000-000000000010.parquet, 1/1/1/1/00000000-0000-0000-0000-000000000011.parquet, 1/1/1/1/00000000-0000-0000-0000-000000000012.parquet, 1/1/1/1/00000000-0000-0000-0000-000000000013.parquet], projection=[f] |
-| | |
-+---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
++---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| plan_type | plan |
++---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| logical_plan | Projection: COUNT(UInt8(1)), SUM(m.f) |
+| | Aggregate: groupBy=[[]], aggr=[[COUNT(UInt8(1)), SUM(m.f)]] |
+| | TableScan: m projection=[f] |
+| physical_plan | ProjectionExec: expr=[COUNT(UInt8(1))@0 as COUNT(UInt8(1)), SUM(m.f)@1 as SUM(m.f)] |
+| | AggregateExec: mode=Final, gby=[], aggr=[COUNT(UInt8(1)), SUM(m.f)] |
+| | CoalescePartitionsExec |
+| | AggregateExec: mode=Partial, gby=[], aggr=[COUNT(UInt8(1)), SUM(m.f)] |
+| | UnionExec |
+| | ProjectionExec: expr=[f@0 as f] |
+| | RepartitionExec: partitioning=RoundRobinBatch(4) |
+| | DeduplicateExec: [tag@1 ASC,time@2 ASC] |
+| | SortPreservingMergeExec: [tag@1 ASC,time@2 ASC] |
+| | UnionExec |
+| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, output_ordering=[tag@1 ASC, time@2 ASC], projection=[f, tag, time] |
+| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet]]}, output_ordering=[tag@1 ASC, time@2 ASC], projection=[f, tag, time] |
+| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000002.parquet]]}, output_ordering=[tag@1 ASC, time@2 ASC], projection=[f, tag, time] |
+| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000003.parquet]]}, output_ordering=[tag@1 ASC, time@2 ASC], projection=[f, tag, time] |
+| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000004.parquet]]}, output_ordering=[tag@1 ASC, time@2 ASC], projection=[f, tag, time] |
+| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000005.parquet]]}, output_ordering=[tag@1 ASC, time@2 ASC], projection=[f, tag, time] |
+| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000006.parquet]]}, output_ordering=[tag@1 ASC, time@2 ASC], projection=[f, tag, time] |
+| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000007.parquet]]}, output_ordering=[tag@1 ASC, time@2 ASC], projection=[f, tag, time] |
+| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000008.parquet]]}, output_ordering=[tag@1 ASC, time@2 ASC], projection=[f, tag, time] |
+| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000009.parquet]]}, output_ordering=[tag@1 ASC, time@2 ASC], projection=[f, tag, time] |
+| | ParquetExec: limit=None, partitions={4 groups: [[1/1/1/1/00000000-0000-0000-0000-00000000000a.parquet, 1/1/1/1/00000000-0000-0000-0000-00000000000b.parquet, 1/1/1/1/00000000-0000-0000-0000-00000000000c.parquet], [1/1/1/1/00000000-0000-0000-0000-00000000000d.parquet, 1/1/1/1/00000000-0000-0000-0000-00000000000e.parquet, 1/1/1/1/00000000-0000-0000-0000-00000000000f.parquet], [1/1/1/1/00000000-0000-0000-0000-000000000010.parquet, 1/1/1/1/00000000-0000-0000-0000-000000000011.parquet], [1/1/1/1/00000000-0000-0000-0000-000000000012.parquet, 1/1/1/1/00000000-0000-0000-0000-000000000013.parquet]]}, projection=[f] |
+| | |
++---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
diff --git a/query_tests/cases/in/periods.expected b/query_tests/cases/in/periods.expected
new file mode 100644
index 0000000000..47a9dd7349
--- /dev/null
+++ b/query_tests/cases/in/periods.expected
@@ -0,0 +1,21 @@
+-- Test Setup: PeriodsInNames
+-- SQL: SELECT * from "measurement.one";
++-----------+-----------+---------+---------+--------------------------------+
+| field.one | field.two | tag.one | tag.two | time |
++-----------+-----------+---------+---------+--------------------------------+
+| 1 | true | value | other | 2021-01-01T00:00:01.000000001Z |
+| 1 | false | value2 | other2 | 2021-01-01T00:00:01.000000002Z |
++-----------+-----------+---------+---------+--------------------------------+
+-- SQL: SELECT "tag.one" from "measurement.one";
++---------+
+| tag.one |
++---------+
+| value |
+| value2 |
++---------+
+-- SQL: SELECT "tag.one" from "measurement.one" where "field.two" is TRUE;
++---------+
+| tag.one |
++---------+
+| value |
++---------+
diff --git a/query_tests/cases/in/periods.sql b/query_tests/cases/in/periods.sql
new file mode 100644
index 0000000000..886897decb
--- /dev/null
+++ b/query_tests/cases/in/periods.sql
@@ -0,0 +1,13 @@
+-- Basic query tests for measurement names that have periods in their names
+-- IOX_SETUP: PeriodsInNames
+
+-- query data
+SELECT * from "measurement.one";
+
+
+
+-- projection
+SELECT "tag.one" from "measurement.one";
+
+-- predicate
+SELECT "tag.one" from "measurement.one" where "field.two" is TRUE;
diff --git a/query_tests/cases/in/pushdown.expected b/query_tests/cases/in/pushdown.expected
index 2fa10056e9..196fac6170 100644
--- a/query_tests/cases/in/pushdown.expected
+++ b/query_tests/cases/in/pushdown.expected
@@ -14,15 +14,15 @@
+-------+--------+--------------------------------+-----------+
-- SQL: EXPLAIN SELECT * from restaurant;
-- Results After Normalizing UUIDs
-+---------------+--------------------------------------------------------------------------------------------------------------------------------------+
-| plan_type | plan |
-+---------------+--------------------------------------------------------------------------------------------------------------------------------------+
-| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town |
-| | TableScan: restaurant projection=[count, system, time, town] |
-| physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], projection=[count, system, time, town] |
-| | |
-+---------------+--------------------------------------------------------------------------------------------------------------------------------------+
++---------------+---------------------------------------------------------------------------------------------------------------------------------------------------+
+| plan_type | plan |
++---------------+---------------------------------------------------------------------------------------------------------------------------------------------------+
+| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town |
+| | TableScan: restaurant projection=[count, system, time, town] |
+| physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] |
+| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, projection=[count, system, time, town] |
+| | |
++---------------+---------------------------------------------------------------------------------------------------------------------------------------------------+
-- SQL: SELECT * from restaurant where count > 200;
-- Results After Sorting
+-------+--------+--------------------------------+-----------+
@@ -37,49 +37,49 @@
+-------+--------+--------------------------------+-----------+
-- SQL: EXPLAIN SELECT * from restaurant where count > 200;
-- Results After Normalizing UUIDs
-+---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| plan_type | plan |
-+---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town |
-| | Filter: restaurant.count > UInt64(200) |
-| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[restaurant.count > UInt64(200)] |
-| physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] |
-| | CoalesceBatchesExec: target_batch_size=4096 |
-| | FilterExec: count@0 > 200 |
-| | RepartitionExec: partitioning=RoundRobinBatch(4) |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=count > UInt64(200), pruning_predicate=count_max@0 > 200, projection=[count, system, time, town] |
-| | |
-+---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
++---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| plan_type | plan |
++---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town |
+| | Filter: restaurant.count > UInt64(200) |
+| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[restaurant.count > UInt64(200)] |
+| physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] |
+| | CoalesceBatchesExec: target_batch_size=4096 |
+| | FilterExec: count@0 > 200 |
+| | RepartitionExec: partitioning=RoundRobinBatch(4) |
+| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=count > UInt64(200), pruning_predicate=count_max@0 > 200, projection=[count, system, time, town] |
+| | |
++---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-- SQL: EXPLAIN SELECT * from restaurant where count > 200.0;
-- Results After Normalizing UUIDs
-+---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| plan_type | plan |
-+---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town |
-| | Filter: CAST(restaurant.count AS Float64) > Float64(200) |
-| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[CAST(restaurant.count AS Float64) > Float64(200)] |
-| physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] |
-| | CoalesceBatchesExec: target_batch_size=4096 |
-| | FilterExec: CAST(count@0 AS Float64) > 200 |
-| | RepartitionExec: partitioning=RoundRobinBatch(4) |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=CAST(count AS Float64) > Float64(200), projection=[count, system, time, town] |
-| | |
-+---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
++---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| plan_type | plan |
++---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town |
+| | Filter: CAST(restaurant.count AS Float64) > Float64(200) |
+| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[CAST(restaurant.count AS Float64) > Float64(200)] |
+| physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] |
+| | CoalesceBatchesExec: target_batch_size=4096 |
+| | FilterExec: CAST(count@0 AS Float64) > 200 |
+| | RepartitionExec: partitioning=RoundRobinBatch(4) |
+| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=CAST(count AS Float64) > Float64(200), projection=[count, system, time, town] |
+| | |
++---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-- SQL: EXPLAIN SELECT * from restaurant where system > 4.0;
-- Results After Normalizing UUIDs
-+---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| plan_type | plan |
-+---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town |
-| | Filter: restaurant.system > Float64(4) |
-| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[restaurant.system > Float64(4)] |
-| physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] |
-| | CoalesceBatchesExec: target_batch_size=4096 |
-| | FilterExec: system@1 > 4 |
-| | RepartitionExec: partitioning=RoundRobinBatch(4) |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=system > Float64(4), pruning_predicate=system_max@0 > 4, projection=[count, system, time, town] |
-| | |
-+---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
++---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| plan_type | plan |
++---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town |
+| | Filter: restaurant.system > Float64(4) |
+| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[restaurant.system > Float64(4)] |
+| physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] |
+| | CoalesceBatchesExec: target_batch_size=4096 |
+| | FilterExec: system@1 > 4 |
+| | RepartitionExec: partitioning=RoundRobinBatch(4) |
+| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=system > Float64(4), pruning_predicate=system_max@0 > 4, projection=[count, system, time, town] |
+| | |
++---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-- SQL: SELECT * from restaurant where count > 200 and town != 'tewsbury';
-- Results After Sorting
+-------+--------+--------------------------------+-----------+
@@ -93,19 +93,19 @@
+-------+--------+--------------------------------+-----------+
-- SQL: EXPLAIN SELECT * from restaurant where count > 200 and town != 'tewsbury';
-- Results After Normalizing UUIDs
-+---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| plan_type | plan |
-+---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town |
-| | Filter: restaurant.count > UInt64(200) AND restaurant.town != Dictionary(Int32, Utf8("tewsbury")) |
-| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[restaurant.count > UInt64(200), restaurant.town != Dictionary(Int32, Utf8("tewsbury"))] |
-| physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] |
-| | CoalesceBatchesExec: target_batch_size=4096 |
-| | FilterExec: count@0 > 200 AND town@3 != tewsbury |
-| | RepartitionExec: partitioning=RoundRobinBatch(4) |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=count > UInt64(200) AND town != Dictionary(Int32, Utf8("tewsbury")), pruning_predicate=count_max@0 > 200 AND town_min@1 != tewsbury OR tewsbury != town_max@2, projection=[count, system, time, town] |
-| | |
-+---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
++---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| plan_type | plan |
++---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town |
+| | Filter: restaurant.count > UInt64(200) AND restaurant.town != Dictionary(Int32, Utf8("tewsbury")) |
+| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[restaurant.count > UInt64(200), restaurant.town != Dictionary(Int32, Utf8("tewsbury"))] |
+| physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] |
+| | CoalesceBatchesExec: target_batch_size=4096 |
+| | FilterExec: count@0 > 200 AND town@3 != tewsbury |
+| | RepartitionExec: partitioning=RoundRobinBatch(4) |
+| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=count > UInt64(200) AND town != Dictionary(Int32, Utf8("tewsbury")), pruning_predicate=count_max@0 > 200 AND town_min@1 != tewsbury OR tewsbury != town_max@2, projection=[count, system, time, town] |
+| | |
++---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-- SQL: SELECT * from restaurant where count > 200 and town != 'tewsbury' and (system =5 or town = 'lawrence');
-- Results After Sorting
+-------+--------+--------------------------------+-----------+
@@ -118,19 +118,19 @@
+-------+--------+--------------------------------+-----------+
-- SQL: EXPLAIN SELECT * from restaurant where count > 200 and town != 'tewsbury' and (system =5 or town = 'lawrence');
-- Results After Normalizing UUIDs
-+---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| plan_type | plan |
-+---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town |
-| | Filter: restaurant.count > UInt64(200) AND restaurant.town != Dictionary(Int32, Utf8("tewsbury")) AND (restaurant.system = Float64(5) OR restaurant.town = Dictionary(Int32, Utf8("lawrence"))) |
-| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[restaurant.count > UInt64(200), restaurant.town != Dictionary(Int32, Utf8("tewsbury")), restaurant.system = Float64(5) OR restaurant.town = Dictionary(Int32, Utf8("lawrence"))] |
-| physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] |
-| | CoalesceBatchesExec: target_batch_size=4096 |
-| | FilterExec: count@0 > 200 AND town@3 != tewsbury AND system@1 = 5 OR town@3 = lawrence |
-| | RepartitionExec: partitioning=RoundRobinBatch(4) |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=count > UInt64(200) AND town != Dictionary(Int32, Utf8("tewsbury")) AND (system = Float64(5) OR town = Dictionary(Int32, Utf8("lawrence"))), pruning_predicate=count_max@0 > 200 AND town_min@1 != tewsbury OR tewsbury != town_max@2 AND system_min@3 <= 5 AND 5 <= system_max@4 OR town_min@1 <= lawrence AND lawrence <= town_max@2, projection=[count, system, time, town] |
-| | |
-+---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
++---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| plan_type | plan |
++---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town |
+| | Filter: restaurant.count > UInt64(200) AND restaurant.town != Dictionary(Int32, Utf8("tewsbury")) AND (restaurant.system = Float64(5) OR restaurant.town = Dictionary(Int32, Utf8("lawrence"))) |
+| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[restaurant.count > UInt64(200), restaurant.town != Dictionary(Int32, Utf8("tewsbury")), restaurant.system = Float64(5) OR restaurant.town = Dictionary(Int32, Utf8("lawrence"))] |
+| physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] |
+| | CoalesceBatchesExec: target_batch_size=4096 |
+| | FilterExec: count@0 > 200 AND town@3 != tewsbury AND system@1 = 5 OR town@3 = lawrence |
+| | RepartitionExec: partitioning=RoundRobinBatch(4) |
+| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=count > UInt64(200) AND town != Dictionary(Int32, Utf8("tewsbury")) AND (system = Float64(5) OR town = Dictionary(Int32, Utf8("lawrence"))), pruning_predicate=count_max@0 > 200 AND town_min@1 != tewsbury OR tewsbury != town_max@2 AND system_min@3 <= 5 AND 5 <= system_max@4 OR town_min@1 <= lawrence AND lawrence <= town_max@2, projection=[count, system, time, town] |
+| | |
++---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-- SQL: SELECT * from restaurant where count > 200 and town != 'tewsbury' and (system =5 or town = 'lawrence') and count < 40000;
-- Results After Sorting
+-------+--------+--------------------------------+-----------+
@@ -142,19 +142,19 @@
+-------+--------+--------------------------------+-----------+
-- SQL: EXPLAIN SELECT * from restaurant where count > 200 and town != 'tewsbury' and (system =5 or town = 'lawrence') and count < 40000;
-- Results After Normalizing UUIDs
-+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| plan_type | plan |
-+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town |
-| | Filter: restaurant.count > UInt64(200) AND restaurant.town != Dictionary(Int32, Utf8("tewsbury")) AND (restaurant.system = Float64(5) OR restaurant.town = Dictionary(Int32, Utf8("lawrence"))) AND restaurant.count < UInt64(40000) |
-| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[restaurant.count > UInt64(200), restaurant.town != Dictionary(Int32, Utf8("tewsbury")), restaurant.system = Float64(5) OR restaurant.town = Dictionary(Int32, Utf8("lawrence")), restaurant.count < UInt64(40000)] |
-| physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] |
-| | CoalesceBatchesExec: target_batch_size=4096 |
-| | FilterExec: count@0 > 200 AND town@3 != tewsbury AND system@1 = 5 OR town@3 = lawrence AND count@0 < 40000 |
-| | RepartitionExec: partitioning=RoundRobinBatch(4) |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=count > UInt64(200) AND town != Dictionary(Int32, Utf8("tewsbury")) AND (system = Float64(5) OR town = Dictionary(Int32, Utf8("lawrence"))) AND count < UInt64(40000), pruning_predicate=count_max@0 > 200 AND town_min@1 != tewsbury OR tewsbury != town_max@2 AND system_min@3 <= 5 AND 5 <= system_max@4 OR town_min@1 <= lawrence AND lawrence <= town_max@2 AND count_min@5 < 40000, projection=[count, system, time, town] |
-| | |
-+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
++---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| plan_type | plan |
++---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town |
+| | Filter: restaurant.count > UInt64(200) AND restaurant.town != Dictionary(Int32, Utf8("tewsbury")) AND (restaurant.system = Float64(5) OR restaurant.town = Dictionary(Int32, Utf8("lawrence"))) AND restaurant.count < UInt64(40000) |
+| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[restaurant.count > UInt64(200), restaurant.town != Dictionary(Int32, Utf8("tewsbury")), restaurant.system = Float64(5) OR restaurant.town = Dictionary(Int32, Utf8("lawrence")), restaurant.count < UInt64(40000)] |
+| physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] |
+| | CoalesceBatchesExec: target_batch_size=4096 |
+| | FilterExec: count@0 > 200 AND town@3 != tewsbury AND system@1 = 5 OR town@3 = lawrence AND count@0 < 40000 |
+| | RepartitionExec: partitioning=RoundRobinBatch(4) |
+| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=count > UInt64(200) AND town != Dictionary(Int32, Utf8("tewsbury")) AND (system = Float64(5) OR town = Dictionary(Int32, Utf8("lawrence"))) AND count < UInt64(40000), pruning_predicate=count_max@0 > 200 AND town_min@1 != tewsbury OR tewsbury != town_max@2 AND system_min@3 <= 5 AND 5 <= system_max@4 OR town_min@1 <= lawrence AND lawrence <= town_max@2 AND count_min@5 < 40000, projection=[count, system, time, town] |
+| | |
++---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-- SQL: SELECT * from restaurant where count > 200 and count < 40000;
-- Results After Sorting
+-------+--------+--------------------------------+-----------+
@@ -168,19 +168,19 @@
+-------+--------+--------------------------------+-----------+
-- SQL: EXPLAIN SELECT * from restaurant where count > 200 and count < 40000;
-- Results After Normalizing UUIDs
-+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| plan_type | plan |
-+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town |
-| | Filter: restaurant.count > UInt64(200) AND restaurant.count < UInt64(40000) |
-| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[restaurant.count > UInt64(200), restaurant.count < UInt64(40000)] |
-| physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] |
-| | CoalesceBatchesExec: target_batch_size=4096 |
-| | FilterExec: count@0 > 200 AND count@0 < 40000 |
-| | RepartitionExec: partitioning=RoundRobinBatch(4) |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=count > UInt64(200) AND count < UInt64(40000), pruning_predicate=count_max@0 > 200 AND count_min@1 < 40000, projection=[count, system, time, town] |
-| | |
-+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
++---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| plan_type | plan |
++---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town |
+| | Filter: restaurant.count > UInt64(200) AND restaurant.count < UInt64(40000) |
+| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[restaurant.count > UInt64(200), restaurant.count < UInt64(40000)] |
+| physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] |
+| | CoalesceBatchesExec: target_batch_size=4096 |
+| | FilterExec: count@0 > 200 AND count@0 < 40000 |
+| | RepartitionExec: partitioning=RoundRobinBatch(4) |
+| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=count > UInt64(200) AND count < UInt64(40000), pruning_predicate=count_max@0 > 200 AND count_min@1 < 40000, projection=[count, system, time, town] |
+| | |
++---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-- SQL: SELECT * from restaurant where system > 4.0 and system < 7.0;
-- Results After Sorting
+-------+--------+--------------------------------+-----------+
@@ -195,19 +195,19 @@
+-------+--------+--------------------------------+-----------+
-- SQL: EXPLAIN SELECT * from restaurant where system > 4.0 and system < 7.0;
-- Results After Normalizing UUIDs
-+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| plan_type | plan |
-+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town |
-| | Filter: restaurant.system > Float64(4) AND restaurant.system < Float64(7) |
-| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[restaurant.system > Float64(4), restaurant.system < Float64(7)] |
-| physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] |
-| | CoalesceBatchesExec: target_batch_size=4096 |
-| | FilterExec: system@1 > 4 AND system@1 < 7 |
-| | RepartitionExec: partitioning=RoundRobinBatch(4) |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=system > Float64(4) AND system < Float64(7), pruning_predicate=system_max@0 > 4 AND system_min@1 < 7, projection=[count, system, time, town] |
-| | |
-+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
++---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| plan_type | plan |
++---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town |
+| | Filter: restaurant.system > Float64(4) AND restaurant.system < Float64(7) |
+| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[restaurant.system > Float64(4), restaurant.system < Float64(7)] |
+| physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] |
+| | CoalesceBatchesExec: target_batch_size=4096 |
+| | FilterExec: system@1 > 4 AND system@1 < 7 |
+| | RepartitionExec: partitioning=RoundRobinBatch(4) |
+| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=system > Float64(4) AND system < Float64(7), pruning_predicate=system_max@0 > 4 AND system_min@1 < 7, projection=[count, system, time, town] |
+| | |
++---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-- SQL: SELECT * from restaurant where system > 5.0 and system < 7.0;
-- Results After Sorting
+-------+--------+--------------------------------+----------+
@@ -219,19 +219,19 @@
+-------+--------+--------------------------------+----------+
-- SQL: EXPLAIN SELECT * from restaurant where system > 5.0 and system < 7.0;
-- Results After Normalizing UUIDs
-+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| plan_type | plan |
-+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town |
-| | Filter: restaurant.system > Float64(5) AND restaurant.system < Float64(7) |
-| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[restaurant.system > Float64(5), restaurant.system < Float64(7)] |
-| physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] |
-| | CoalesceBatchesExec: target_batch_size=4096 |
-| | FilterExec: system@1 > 5 AND system@1 < 7 |
-| | RepartitionExec: partitioning=RoundRobinBatch(4) |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=system > Float64(5) AND system < Float64(7), pruning_predicate=system_max@0 > 5 AND system_min@1 < 7, projection=[count, system, time, town] |
-| | |
-+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
++---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| plan_type | plan |
++---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town |
+| | Filter: restaurant.system > Float64(5) AND restaurant.system < Float64(7) |
+| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[restaurant.system > Float64(5), restaurant.system < Float64(7)] |
+| physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] |
+| | CoalesceBatchesExec: target_batch_size=4096 |
+| | FilterExec: system@1 > 5 AND system@1 < 7 |
+| | RepartitionExec: partitioning=RoundRobinBatch(4) |
+| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=system > Float64(5) AND system < Float64(7), pruning_predicate=system_max@0 > 5 AND system_min@1 < 7, projection=[count, system, time, town] |
+| | |
++---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-- SQL: SELECT * from restaurant where system > 5.0 and town != 'tewsbury' and 7.0 > system;
-- Results After Sorting
+-------+--------+--------------------------------+----------+
@@ -242,19 +242,19 @@
+-------+--------+--------------------------------+----------+
-- SQL: EXPLAIN SELECT * from restaurant where system > 5.0 and town != 'tewsbury' and 7.0 > system;
-- Results After Normalizing UUIDs
-+---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| plan_type | plan |
-+---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town |
-| | Filter: restaurant.system > Float64(5) AND restaurant.town != Dictionary(Int32, Utf8("tewsbury")) AND Float64(7) > restaurant.system |
-| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[restaurant.system > Float64(5), restaurant.town != Dictionary(Int32, Utf8("tewsbury")), Float64(7) > restaurant.system] |
-| physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] |
-| | CoalesceBatchesExec: target_batch_size=4096 |
-| | FilterExec: system@1 > 5 AND town@3 != tewsbury AND 7 > system@1 |
-| | RepartitionExec: partitioning=RoundRobinBatch(4) |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=system > Float64(5) AND town != Dictionary(Int32, Utf8("tewsbury")) AND Float64(7) > system, pruning_predicate=system_max@0 > 5 AND town_min@1 != tewsbury OR tewsbury != town_max@2 AND system_min@3 < 7, projection=[count, system, time, town] |
-| | |
-+---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
++---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| plan_type | plan |
++---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town |
+| | Filter: restaurant.system > Float64(5) AND restaurant.town != Dictionary(Int32, Utf8("tewsbury")) AND Float64(7) > restaurant.system |
+| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[restaurant.system > Float64(5), restaurant.town != Dictionary(Int32, Utf8("tewsbury")), Float64(7) > restaurant.system] |
+| physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] |
+| | CoalesceBatchesExec: target_batch_size=4096 |
+| | FilterExec: system@1 > 5 AND town@3 != tewsbury AND 7 > system@1 |
+| | RepartitionExec: partitioning=RoundRobinBatch(4) |
+| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=system > Float64(5) AND town != Dictionary(Int32, Utf8("tewsbury")) AND Float64(7) > system, pruning_predicate=system_max@0 > 5 AND town_min@1 != tewsbury OR tewsbury != town_max@2 AND system_min@3 < 7, projection=[count, system, time, town] |
+| | |
++---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-- SQL: SELECT * from restaurant where system > 5.0 and 'tewsbury' != town and system < 7.0 and (count = 632 or town = 'reading');
-- Results After Sorting
+-------+--------+--------------------------------+---------+
@@ -264,19 +264,19 @@
+-------+--------+--------------------------------+---------+
-- SQL: EXPLAIN SELECT * from restaurant where system > 5.0 and 'tewsbury' != town and system < 7.0 and (count = 632 or town = 'reading');
-- Results After Normalizing UUIDs
-+---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| plan_type | plan |
-+---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town |
-| | Filter: restaurant.system > Float64(5) AND Dictionary(Int32, Utf8("tewsbury")) != restaurant.town AND restaurant.system < Float64(7) AND (restaurant.count = UInt64(632) OR restaurant.town = Dictionary(Int32, Utf8("reading"))) |
-| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[restaurant.system > Float64(5), Dictionary(Int32, Utf8("tewsbury")) != restaurant.town, restaurant.system < Float64(7), restaurant.count = UInt64(632) OR restaurant.town = Dictionary(Int32, Utf8("reading"))] |
-| physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] |
-| | CoalesceBatchesExec: target_batch_size=4096 |
-| | FilterExec: system@1 > 5 AND tewsbury != town@3 AND system@1 < 7 AND count@0 = 632 OR town@3 = reading |
-| | RepartitionExec: partitioning=RoundRobinBatch(4) |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=system > Float64(5) AND Dictionary(Int32, Utf8("tewsbury")) != town AND system < Float64(7) AND (count = UInt64(632) OR town = Dictionary(Int32, Utf8("reading"))), pruning_predicate=system_max@0 > 5 AND town_min@1 != tewsbury OR tewsbury != town_max@2 AND system_min@3 < 7 AND count_min@4 <= 632 AND 632 <= count_max@5 OR town_min@1 <= reading AND reading <= town_max@2, projection=[count, system, time, town] |
-| | |
-+---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
++---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| plan_type | plan |
++---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town |
+| | Filter: restaurant.system > Float64(5) AND Dictionary(Int32, Utf8("tewsbury")) != restaurant.town AND restaurant.system < Float64(7) AND (restaurant.count = UInt64(632) OR restaurant.town = Dictionary(Int32, Utf8("reading"))) |
+| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[restaurant.system > Float64(5), Dictionary(Int32, Utf8("tewsbury")) != restaurant.town, restaurant.system < Float64(7), restaurant.count = UInt64(632) OR restaurant.town = Dictionary(Int32, Utf8("reading"))] |
+| physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] |
+| | CoalesceBatchesExec: target_batch_size=4096 |
+| | FilterExec: system@1 > 5 AND tewsbury != town@3 AND system@1 < 7 AND count@0 = 632 OR town@3 = reading |
+| | RepartitionExec: partitioning=RoundRobinBatch(4) |
+| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=system > Float64(5) AND Dictionary(Int32, Utf8("tewsbury")) != town AND system < Float64(7) AND (count = UInt64(632) OR town = Dictionary(Int32, Utf8("reading"))), pruning_predicate=system_max@0 > 5 AND town_min@1 != tewsbury OR tewsbury != town_max@2 AND system_min@3 < 7 AND count_min@4 <= 632 AND 632 <= count_max@5 OR town_min@1 <= reading AND reading <= town_max@2, projection=[count, system, time, town] |
+| | |
++---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-- SQL: SELECT * from restaurant where 5.0 < system and town != 'tewsbury' and system < 7.0 and (count = 632 or town = 'reading') and time > to_timestamp('1970-01-01T00:00:00.000000130+00:00');
-- Results After Sorting
++
diff --git a/query_tests/cases/in/retention.expected b/query_tests/cases/in/retention.expected
index 7bfbcdec45..ec8856fb1f 100644
--- a/query_tests/cases/in/retention.expected
+++ b/query_tests/cases/in/retention.expected
@@ -9,30 +9,30 @@
+------+------+----------------------+
-- SQL: EXPLAIN SELECT * FROM cpu order by host, load, time;
-- Results After Normalizing UUIDs
-+---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| plan_type | plan |
-+---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| logical_plan | Sort: cpu.host ASC NULLS LAST, cpu.load ASC NULLS LAST, cpu.time ASC NULLS LAST |
-| | Projection: cpu.host, cpu.load, cpu.time |
-| | TableScan: cpu projection=[host, load, time] |
-| physical_plan | SortExec: [host@0 ASC NULLS LAST,load@1 ASC NULLS LAST,time@2 ASC NULLS LAST] |
-| | CoalescePartitionsExec |
-| | ProjectionExec: expr=[host@0 as host, load@1 as load, time@2 as time] |
-| | RepartitionExec: partitioning=RoundRobinBatch(4) |
-| | DeduplicateExec: [host@0 ASC,time@2 ASC] |
-| | SortPreservingMergeExec: [host@0 ASC,time@2 ASC] |
-| | SortExec: [host@0 ASC,time@2 ASC] |
-| | UnionExec |
-| | CoalesceBatchesExec: target_batch_size=4096 |
-| | FilterExec: time@2 < -9223372036854775808 OR time@2 > -3600000000000 |
-| | RepartitionExec: partitioning=RoundRobinBatch(4) |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], output_ordering=[host@0 ASC, time@2 ASC], projection=[host, load, time] |
-| | CoalesceBatchesExec: target_batch_size=4096 |
-| | FilterExec: time@2 < -9223372036854775808 OR time@2 > -3600000000000 |
-| | RepartitionExec: partitioning=RoundRobinBatch(4) |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet], output_ordering=[host@0 ASC, time@2 ASC], projection=[host, load, time] |
-| | |
-+---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
++---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| plan_type | plan |
++---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| logical_plan | Sort: cpu.host ASC NULLS LAST, cpu.load ASC NULLS LAST, cpu.time ASC NULLS LAST |
+| | Projection: cpu.host, cpu.load, cpu.time |
+| | TableScan: cpu projection=[host, load, time] |
+| physical_plan | SortExec: [host@0 ASC NULLS LAST,load@1 ASC NULLS LAST,time@2 ASC NULLS LAST] |
+| | CoalescePartitionsExec |
+| | ProjectionExec: expr=[host@0 as host, load@1 as load, time@2 as time] |
+| | RepartitionExec: partitioning=RoundRobinBatch(4) |
+| | DeduplicateExec: [host@0 ASC,time@2 ASC] |
+| | SortPreservingMergeExec: [host@0 ASC,time@2 ASC] |
+| | SortExec: [host@0 ASC,time@2 ASC] |
+| | UnionExec |
+| | CoalesceBatchesExec: target_batch_size=4096 |
+| | FilterExec: time@2 < -9223372036854775808 OR time@2 > -3600000000000 |
+| | RepartitionExec: partitioning=RoundRobinBatch(4) |
+| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, output_ordering=[host@0 ASC, time@2 ASC], projection=[host, load, time] |
+| | CoalesceBatchesExec: target_batch_size=4096 |
+| | FilterExec: time@2 < -9223372036854775808 OR time@2 > -3600000000000 |
+| | RepartitionExec: partitioning=RoundRobinBatch(4) |
+| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet]]}, output_ordering=[host@0 ASC, time@2 ASC], projection=[host, load, time] |
+| | |
++---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-- SQL: SELECT * FROM cpu WHERE host != 'b' ORDER BY host,time;
+------+------+----------------------+
| host | load | time |
@@ -42,30 +42,30 @@
+------+------+----------------------+
-- SQL: EXPLAIN SELECT * FROM cpu WHERE host != 'b' ORDER BY host,time;
-- Results After Normalizing UUIDs
-+---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| plan_type | plan |
-+---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| logical_plan | Sort: cpu.host ASC NULLS LAST, cpu.time ASC NULLS LAST |
-| | Projection: cpu.host, cpu.load, cpu.time |
-| | Filter: cpu.host != Dictionary(Int32, Utf8("b")) |
-| | TableScan: cpu projection=[host, load, time], partial_filters=[cpu.host != Dictionary(Int32, Utf8("b"))] |
-| physical_plan | SortExec: [host@0 ASC NULLS LAST,time@2 ASC NULLS LAST] |
-| | CoalescePartitionsExec |
-| | ProjectionExec: expr=[host@0 as host, load@1 as load, time@2 as time] |
-| | CoalesceBatchesExec: target_batch_size=4096 |
-| | FilterExec: host@0 != b |
-| | RepartitionExec: partitioning=RoundRobinBatch(4) |
-| | DeduplicateExec: [host@0 ASC,time@2 ASC] |
-| | SortPreservingMergeExec: [host@0 ASC,time@2 ASC] |
-| | SortExec: [host@0 ASC,time@2 ASC] |
-| | UnionExec |
-| | CoalesceBatchesExec: target_batch_size=4096 |
-| | FilterExec: time@2 < -9223372036854775808 OR time@2 > -3600000000000 |
-| | RepartitionExec: partitioning=RoundRobinBatch(4) |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=host != Dictionary(Int32, Utf8("b")), pruning_predicate=host_min@0 != b OR b != host_max@1, output_ordering=[host@0 ASC, time@2 ASC], projection=[host, load, time] |
-| | CoalesceBatchesExec: target_batch_size=4096 |
-| | FilterExec: time@2 < -9223372036854775808 OR time@2 > -3600000000000 |
-| | RepartitionExec: partitioning=RoundRobinBatch(4) |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet], predicate=host != Dictionary(Int32, Utf8("b")), pruning_predicate=host_min@0 != b OR b != host_max@1, output_ordering=[host@0 ASC, time@2 ASC], projection=[host, load, time] |
-| | |
-+---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
++---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| plan_type | plan |
++---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| logical_plan | Sort: cpu.host ASC NULLS LAST, cpu.time ASC NULLS LAST |
+| | Projection: cpu.host, cpu.load, cpu.time |
+| | Filter: cpu.host != Dictionary(Int32, Utf8("b")) |
+| | TableScan: cpu projection=[host, load, time], partial_filters=[cpu.host != Dictionary(Int32, Utf8("b"))] |
+| physical_plan | SortExec: [host@0 ASC NULLS LAST,time@2 ASC NULLS LAST] |
+| | CoalescePartitionsExec |
+| | ProjectionExec: expr=[host@0 as host, load@1 as load, time@2 as time] |
+| | CoalesceBatchesExec: target_batch_size=4096 |
+| | FilterExec: host@0 != b |
+| | RepartitionExec: partitioning=RoundRobinBatch(4) |
+| | DeduplicateExec: [host@0 ASC,time@2 ASC] |
+| | SortPreservingMergeExec: [host@0 ASC,time@2 ASC] |
+| | SortExec: [host@0 ASC,time@2 ASC] |
+| | UnionExec |
+| | CoalesceBatchesExec: target_batch_size=4096 |
+| | FilterExec: time@2 < -9223372036854775808 OR time@2 > -3600000000000 |
+| | RepartitionExec: partitioning=RoundRobinBatch(4) |
+| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=host != Dictionary(Int32, Utf8("b")), pruning_predicate=host_min@0 != b OR b != host_max@1, output_ordering=[host@0 ASC, time@2 ASC], projection=[host, load, time] |
+| | CoalesceBatchesExec: target_batch_size=4096 |
+| | FilterExec: time@2 < -9223372036854775808 OR time@2 > -3600000000000 |
+| | RepartitionExec: partitioning=RoundRobinBatch(4) |
+| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet]]}, predicate=host != Dictionary(Int32, Utf8("b")), pruning_predicate=host_min@0 != b OR b != host_max@1, output_ordering=[host@0 ASC, time@2 ASC], projection=[host, load, time] |
+| | |
++---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
diff --git a/query_tests/cases/in/several_chunks.expected b/query_tests/cases/in/several_chunks.expected
index b1f8640a5a..62848cefb2 100644
--- a/query_tests/cases/in/several_chunks.expected
+++ b/query_tests/cases/in/several_chunks.expected
@@ -14,25 +14,25 @@
+---------+------------+-------+------+--------------------------------+
-- SQL: EXPLAIN SELECT * from h2o;
-- Results After Normalizing UUIDs
-+---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| plan_type | plan |
-+---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| logical_plan | Projection: h2o.city, h2o.other_temp, h2o.state, h2o.temp, h2o.time |
-| | TableScan: h2o projection=[city, other_temp, state, temp, time] |
-| physical_plan | ProjectionExec: expr=[city@0 as city, other_temp@1 as other_temp, state@2 as state, temp@3 as temp, time@4 as time] |
-| | UnionExec |
-| | DeduplicateExec: [city@0 ASC,state@2 ASC,time@4 ASC] |
-| | SortPreservingMergeExec: [city@0 ASC,state@2 ASC,time@4 ASC] |
-| | UnionExec |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], output_ordering=[city@0 ASC, state@2 ASC, time@4 ASC], projection=[city, other_temp, state, temp, time] |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet], output_ordering=[city@0 ASC, state@2 ASC, time@4 ASC], projection=[city, other_temp, state, temp, time] |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000002.parquet], output_ordering=[city@0 ASC, state@2 ASC, time@4 ASC], projection=[city, other_temp, state, temp, time] |
-| | DeduplicateExec: [city@0 ASC,state@2 ASC,time@4 ASC] |
-| | SortExec: [city@0 ASC,state@2 ASC,time@4 ASC] |
-| | RecordBatchesExec: batches_groups=1 batches=1 |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000003.parquet], projection=[city, other_temp, state, temp, time] |
-| | |
-+---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
++---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| plan_type | plan |
++---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| logical_plan | Projection: h2o.city, h2o.other_temp, h2o.state, h2o.temp, h2o.time |
+| | TableScan: h2o projection=[city, other_temp, state, temp, time] |
+| physical_plan | ProjectionExec: expr=[city@0 as city, other_temp@1 as other_temp, state@2 as state, temp@3 as temp, time@4 as time] |
+| | UnionExec |
+| | DeduplicateExec: [city@0 ASC,state@2 ASC,time@4 ASC] |
+| | SortPreservingMergeExec: [city@0 ASC,state@2 ASC,time@4 ASC] |
+| | UnionExec |
+| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, output_ordering=[city@0 ASC, state@2 ASC, time@4 ASC], projection=[city, other_temp, state, temp, time] |
+| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet]]}, output_ordering=[city@0 ASC, state@2 ASC, time@4 ASC], projection=[city, other_temp, state, temp, time] |
+| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000002.parquet]]}, output_ordering=[city@0 ASC, state@2 ASC, time@4 ASC], projection=[city, other_temp, state, temp, time] |
+| | DeduplicateExec: [city@0 ASC,state@2 ASC,time@4 ASC] |
+| | SortExec: [city@0 ASC,state@2 ASC,time@4 ASC] |
+| | RecordBatchesExec: batches_groups=1 batches=1 |
+| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000003.parquet]]}, projection=[city, other_temp, state, temp, time] |
+| | |
++---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-- SQL: select temp, other_temp, time from h2o;
-- Results After Sorting
+------+------------+--------------------------------+
@@ -48,50 +48,50 @@
+------+------------+--------------------------------+
-- SQL: EXPLAIN select temp, other_temp, time from h2o;
-- Results After Normalizing UUIDs
-+---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| plan_type | plan |
-+---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| logical_plan | Projection: h2o.temp, h2o.other_temp, h2o.time |
-| | TableScan: h2o projection=[other_temp, temp, time] |
-| physical_plan | ProjectionExec: expr=[temp@1 as temp, other_temp@0 as other_temp, time@2 as time] |
-| | UnionExec |
-| | ProjectionExec: expr=[other_temp@1 as other_temp, temp@3 as temp, time@4 as time] |
-| | RepartitionExec: partitioning=RoundRobinBatch(4) |
-| | DeduplicateExec: [city@0 ASC,state@2 ASC,time@4 ASC] |
-| | SortPreservingMergeExec: [city@0 ASC,state@2 ASC,time@4 ASC] |
-| | UnionExec |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], output_ordering=[city@0 ASC, state@2 ASC, time@4 ASC], projection=[city, other_temp, state, temp, time] |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet], output_ordering=[city@0 ASC, state@2 ASC, time@4 ASC], projection=[city, other_temp, state, temp, time] |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000002.parquet], output_ordering=[city@0 ASC, state@2 ASC, time@4 ASC], projection=[city, other_temp, state, temp, time] |
-| | ProjectionExec: expr=[other_temp@1 as other_temp, temp@3 as temp, time@4 as time] |
-| | RepartitionExec: partitioning=RoundRobinBatch(4) |
-| | DeduplicateExec: [city@0 ASC,state@2 ASC,time@4 ASC] |
-| | SortExec: [city@0 ASC,state@2 ASC,time@4 ASC] |
-| | RecordBatchesExec: batches_groups=1 batches=1 |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000003.parquet], projection=[other_temp, temp, time] |
-| | |
-+---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
++---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| plan_type | plan |
++---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| logical_plan | Projection: h2o.temp, h2o.other_temp, h2o.time |
+| | TableScan: h2o projection=[other_temp, temp, time] |
+| physical_plan | ProjectionExec: expr=[temp@1 as temp, other_temp@0 as other_temp, time@2 as time] |
+| | UnionExec |
+| | ProjectionExec: expr=[other_temp@1 as other_temp, temp@3 as temp, time@4 as time] |
+| | RepartitionExec: partitioning=RoundRobinBatch(4) |
+| | DeduplicateExec: [city@0 ASC,state@2 ASC,time@4 ASC] |
+| | SortPreservingMergeExec: [city@0 ASC,state@2 ASC,time@4 ASC] |
+| | UnionExec |
+| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, output_ordering=[city@0 ASC, state@2 ASC, time@4 ASC], projection=[city, other_temp, state, temp, time] |
+| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet]]}, output_ordering=[city@0 ASC, state@2 ASC, time@4 ASC], projection=[city, other_temp, state, temp, time] |
+| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000002.parquet]]}, output_ordering=[city@0 ASC, state@2 ASC, time@4 ASC], projection=[city, other_temp, state, temp, time] |
+| | ProjectionExec: expr=[other_temp@1 as other_temp, temp@3 as temp, time@4 as time] |
+| | RepartitionExec: partitioning=RoundRobinBatch(4) |
+| | DeduplicateExec: [city@0 ASC,state@2 ASC,time@4 ASC] |
+| | SortExec: [city@0 ASC,state@2 ASC,time@4 ASC] |
+| | RecordBatchesExec: batches_groups=1 batches=1 |
+| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000003.parquet]]}, projection=[other_temp, temp, time] |
+| | |
++---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-- SQL: EXPLAIN SELECT * from h2o where time >= to_timestamp('1970-01-01T00:00:00.000000250+00:00');
-- Results After Normalizing UUIDs
-+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| plan_type | plan |
-+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| logical_plan | Projection: h2o.city, h2o.other_temp, h2o.state, h2o.temp, h2o.time |
-| | Filter: h2o.time >= TimestampNanosecond(250, None) |
-| | TableScan: h2o projection=[city, other_temp, state, temp, time], partial_filters=[h2o.time >= TimestampNanosecond(250, None)] |
-| physical_plan | ProjectionExec: expr=[city@0 as city, other_temp@1 as other_temp, state@2 as state, temp@3 as temp, time@4 as time] |
-| | CoalesceBatchesExec: target_batch_size=4096 |
-| | FilterExec: time@4 >= 250 |
-| | RepartitionExec: partitioning=RoundRobinBatch(4) |
-| | UnionExec |
-| | DeduplicateExec: [city@0 ASC,state@2 ASC,time@4 ASC] |
-| | SortPreservingMergeExec: [city@0 ASC,state@2 ASC,time@4 ASC] |
-| | UnionExec |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=time >= TimestampNanosecond(250, None), pruning_predicate=time_max@0 >= 250, output_ordering=[city@0 ASC, state@2 ASC, time@4 ASC], projection=[city, other_temp, state, temp, time] |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet], predicate=time >= TimestampNanosecond(250, None), pruning_predicate=time_max@0 >= 250, output_ordering=[city@0 ASC, state@2 ASC, time@4 ASC], projection=[city, other_temp, state, temp, time] |
-| | DeduplicateExec: [city@0 ASC,state@2 ASC,time@4 ASC] |
-| | SortExec: [city@0 ASC,state@2 ASC,time@4 ASC] |
-| | RecordBatchesExec: batches_groups=1 batches=1 |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000002.parquet], predicate=time >= TimestampNanosecond(250, None), pruning_predicate=time_max@0 >= 250, projection=[city, other_temp, state, temp, time] |
-| | |
-+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
++---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| plan_type | plan |
++---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| logical_plan | Projection: h2o.city, h2o.other_temp, h2o.state, h2o.temp, h2o.time |
+| | Filter: h2o.time >= TimestampNanosecond(250, None) |
+| | TableScan: h2o projection=[city, other_temp, state, temp, time], partial_filters=[h2o.time >= TimestampNanosecond(250, None)] |
+| physical_plan | ProjectionExec: expr=[city@0 as city, other_temp@1 as other_temp, state@2 as state, temp@3 as temp, time@4 as time] |
+| | CoalesceBatchesExec: target_batch_size=4096 |
+| | FilterExec: time@4 >= 250 |
+| | RepartitionExec: partitioning=RoundRobinBatch(4) |
+| | UnionExec |
+| | DeduplicateExec: [city@0 ASC,state@2 ASC,time@4 ASC] |
+| | SortPreservingMergeExec: [city@0 ASC,state@2 ASC,time@4 ASC] |
+| | UnionExec |
+| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=time >= TimestampNanosecond(250, None), pruning_predicate=time_max@0 >= 250, output_ordering=[city@0 ASC, state@2 ASC, time@4 ASC], projection=[city, other_temp, state, temp, time] |
+| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet]]}, predicate=time >= TimestampNanosecond(250, None), pruning_predicate=time_max@0 >= 250, output_ordering=[city@0 ASC, state@2 ASC, time@4 ASC], projection=[city, other_temp, state, temp, time] |
+| | DeduplicateExec: [city@0 ASC,state@2 ASC,time@4 ASC] |
+| | SortExec: [city@0 ASC,state@2 ASC,time@4 ASC] |
+| | RecordBatchesExec: batches_groups=1 batches=1 |
+| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000002.parquet]]}, predicate=time >= TimestampNanosecond(250, None), pruning_predicate=time_max@0 >= 250, projection=[city, other_temp, state, temp, time] |
+| | |
++---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
diff --git a/query_tests/cases/in/two_chunks.expected b/query_tests/cases/in/two_chunks.expected
index 661ffcefa0..33faba6e84 100644
--- a/query_tests/cases/in/two_chunks.expected
+++ b/query_tests/cases/in/two_chunks.expected
@@ -10,20 +10,20 @@
+--------+------------+-------+------+--------------------------------+
-- SQL: EXPLAIN SELECT * from h2o;
-- Results After Normalizing UUIDs
-+---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| plan_type | plan |
-+---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| logical_plan | Projection: h2o.city, h2o.other_temp, h2o.state, h2o.temp, h2o.time |
-| | TableScan: h2o projection=[city, other_temp, state, temp, time] |
-| physical_plan | ProjectionExec: expr=[city@0 as city, other_temp@1 as other_temp, state@2 as state, temp@3 as temp, time@4 as time] |
-| | DeduplicateExec: [city@0 ASC,state@2 ASC,time@4 ASC] |
-| | SortPreservingMergeExec: [city@0 ASC,state@2 ASC,time@4 ASC] |
-| | UnionExec |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], output_ordering=[city@0 ASC, state@2 ASC, time@4 ASC], projection=[city, other_temp, state, temp, time] |
-| | SortExec: [city@0 ASC,state@2 ASC,time@4 ASC] |
-| | RecordBatchesExec: batches_groups=1 batches=1 |
-| | |
-+---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
++---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| plan_type | plan |
++---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| logical_plan | Projection: h2o.city, h2o.other_temp, h2o.state, h2o.temp, h2o.time |
+| | TableScan: h2o projection=[city, other_temp, state, temp, time] |
+| physical_plan | ProjectionExec: expr=[city@0 as city, other_temp@1 as other_temp, state@2 as state, temp@3 as temp, time@4 as time] |
+| | DeduplicateExec: [city@0 ASC,state@2 ASC,time@4 ASC] |
+| | SortPreservingMergeExec: [city@0 ASC,state@2 ASC,time@4 ASC] |
+| | UnionExec |
+| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, output_ordering=[city@0 ASC, state@2 ASC, time@4 ASC], projection=[city, other_temp, state, temp, time] |
+| | SortExec: [city@0 ASC,state@2 ASC,time@4 ASC] |
+| | RecordBatchesExec: batches_groups=1 batches=1 |
+| | |
++---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-- SQL: select temp, other_temp, time from h2o;
+------+------------+--------------------------------+
| temp | other_temp | time |
@@ -34,18 +34,18 @@
+------+------------+--------------------------------+
-- SQL: EXPLAIN select temp, other_temp, time from h2o;
-- Results After Normalizing UUIDs
-+---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| plan_type | plan |
-+---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| logical_plan | Projection: h2o.temp, h2o.other_temp, h2o.time |
-| | TableScan: h2o projection=[other_temp, temp, time] |
-| physical_plan | ProjectionExec: expr=[temp@1 as temp, other_temp@0 as other_temp, time@2 as time] |
-| | ProjectionExec: expr=[other_temp@1 as other_temp, temp@3 as temp, time@4 as time] |
-| | DeduplicateExec: [city@0 ASC,state@2 ASC,time@4 ASC] |
-| | SortPreservingMergeExec: [city@0 ASC,state@2 ASC,time@4 ASC] |
-| | UnionExec |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], output_ordering=[city@0 ASC, state@2 ASC, time@4 ASC], projection=[city, other_temp, state, temp, time] |
-| | SortExec: [city@0 ASC,state@2 ASC,time@4 ASC] |
-| | RecordBatchesExec: batches_groups=1 batches=1 |
-| | |
-+---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
++---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| plan_type | plan |
++---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| logical_plan | Projection: h2o.temp, h2o.other_temp, h2o.time |
+| | TableScan: h2o projection=[other_temp, temp, time] |
+| physical_plan | ProjectionExec: expr=[temp@1 as temp, other_temp@0 as other_temp, time@2 as time] |
+| | ProjectionExec: expr=[other_temp@1 as other_temp, temp@3 as temp, time@4 as time] |
+| | DeduplicateExec: [city@0 ASC,state@2 ASC,time@4 ASC] |
+| | SortPreservingMergeExec: [city@0 ASC,state@2 ASC,time@4 ASC] |
+| | UnionExec |
+| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, output_ordering=[city@0 ASC, state@2 ASC, time@4 ASC], projection=[city, other_temp, state, temp, time] |
+| | SortExec: [city@0 ASC,state@2 ASC,time@4 ASC] |
+| | RecordBatchesExec: batches_groups=1 batches=1 |
+| | |
++---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
diff --git a/query_tests/src/cases.rs b/query_tests/src/cases.rs
index 183ec69e51..f305fd4ee5 100644
--- a/query_tests/src/cases.rs
+++ b/query_tests/src/cases.rs
@@ -116,6 +116,22 @@ async fn test_cases_new_sql_system_tables_sql() {
.expect("flush worked");
}
+#[tokio::test]
+// Tests from "periods.sql",
+async fn test_cases_periods_sql() {
+ test_helpers::maybe_start_logging();
+
+ let input_path = Path::new("cases").join("in").join("periods.sql");
+ let mut runner = Runner::new();
+ runner
+ .run(input_path)
+ .await
+ .expect("test failed");
+ runner
+ .flush()
+ .expect("flush worked");
+}
+
#[tokio::test]
// Tests from "pushdown.sql",
async fn test_cases_pushdown_sql() {
diff --git a/query_tests/src/scenarios.rs b/query_tests/src/scenarios.rs
index fb477d868c..c299042f0b 100644
--- a/query_tests/src/scenarios.rs
+++ b/query_tests/src/scenarios.rs
@@ -66,6 +66,7 @@ pub fn get_all_setups() -> &'static HashMap<String, Arc<dyn DbSetup>> {
register_setup!(ManyFieldsSeveralChunks),
register_setup!(TwoChunksMissingColumns),
register_setup!(AllTypes),
+ register_setup!(PeriodsInNames),
register_setup!(TwoChunksDedupWeirdnessParquet),
register_setup!(TwoChunksDedupWeirdnessParquetIngester),
register_setup!(ThreeChunksWithRetention),
diff --git a/query_tests/src/table_schema.rs b/query_tests/src/table_schema.rs
index 7ef8d40b7e..748bb81d41 100644
--- a/query_tests/src/table_schema.rs
+++ b/query_tests/src/table_schema.rs
@@ -35,7 +35,7 @@ async fn run_table_schema_test_case<D>(
let ctx = db.new_query_context(None);
let chunks = db
- .chunks(table_name, &Default::default(), &None, ctx)
+ .chunks(table_name, &Default::default(), None, ctx)
.await
.expect("error getting chunks");
for chunk in chunks {
diff --git a/schema/src/lib.rs b/schema/src/lib.rs
index 9773ebb1e8..9e711dc0a3 100644
--- a/schema/src/lib.rs
+++ b/schema/src/lib.rs
@@ -375,7 +375,7 @@ impl Schema {
/// Return names of the columns of given indexes with all PK columns (tags and time)
/// If the columns are not provided, return all columns
- pub fn select_given_and_pk_columns(&self, cols: &Option<Vec<usize>>) -> Vec<String> {
+ pub fn select_given_and_pk_columns(&self, cols: Option<&Vec<usize>>) -> Vec<String> {
match cols {
Some(cols) => {
let mut columns = cols
diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml
index 057447362a..153ec9fd8c 100644
--- a/workspace-hack/Cargo.toml
+++ b/workspace-hack/Cargo.toml
@@ -23,9 +23,10 @@ bitflags = { version = "1" }
byteorder = { version = "1", features = ["std"] }
bytes = { version = "1", features = ["std"] }
chrono = { version = "0.4", default-features = false, features = ["alloc", "clock", "iana-time-zone", "serde", "std", "winapi"] }
+clap = { version = "4", features = ["color", "derive", "env", "error-context", "help", "std", "suggestions", "usage"] }
crossbeam-utils = { version = "0.8", features = ["std"] }
crypto-common = { version = "0.1", default-features = false, features = ["std"] }
-datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "799dd747152f6574638a844986b8ea8470d3f4d6", features = ["async-compression", "bzip2", "compression", "crypto_expressions", "flate2", "regex_expressions", "unicode_expressions", "xz2"] }
+datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "fbadebb894672f61327a30f77cda2ee88a343b2a", features = ["async-compression", "bzip2", "compression", "crypto_expressions", "flate2", "regex_expressions", "unicode_expressions", "xz2"] }
digest = { version = "0.10", features = ["alloc", "block-buffer", "core-api", "mac", "std", "subtle"] }
either = { version = "1", features = ["use_std"] }
fixedbitset = { version = "0.4", features = ["std"] }
@@ -136,7 +137,6 @@ url = { version = "2" }
uuid = { version = "1", features = ["getrandom", "rng", "std", "v4"] }
[target.x86_64-unknown-linux-gnu.dependencies]
-io-lifetimes = { version = "1", features = ["close", "libc", "windows-sys"] }
once_cell = { version = "1", default-features = false, features = ["unstable"] }
rustix = { version = "0.36", features = ["fs", "io-lifetimes", "libc", "std", "termios", "use-libc-auxv"] }
@@ -144,7 +144,6 @@ rustix = { version = "0.36", features = ["fs", "io-lifetimes", "libc", "std", "t
once_cell = { version = "1", default-features = false, features = ["unstable"] }
[target.x86_64-apple-darwin.dependencies]
-io-lifetimes = { version = "1", features = ["close", "libc", "windows-sys"] }
once_cell = { version = "1", default-features = false, features = ["unstable"] }
rustix = { version = "0.36", features = ["fs", "io-lifetimes", "libc", "std", "termios", "use-libc-auxv"] }
@@ -152,7 +151,6 @@ rustix = { version = "0.36", features = ["fs", "io-lifetimes", "libc", "std", "t
once_cell = { version = "1", default-features = false, features = ["unstable"] }
[target.aarch64-apple-darwin.dependencies]
-io-lifetimes = { version = "1", features = ["close", "libc", "windows-sys"] }
once_cell = { version = "1", default-features = false, features = ["unstable"] }
rustix = { version = "0.36", features = ["fs", "io-lifetimes", "libc", "std", "termios", "use-libc-auxv"] }
|
d60e4d58238f2ba99b5e600aacd45feaf0b96000
|
Carol (Nichols || Goulding)
|
2023-04-18 05:57:02
|
Delete delete parsing code from router (#7573)
|
And return the "deletes unsupported" error sooner.
|
Co-authored-by: Dom <[email protected]>
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
feat: Delete delete parsing code from router (#7573)
And return the "deletes unsupported" error sooner.
Co-authored-by: Dom <[email protected]>
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
diff --git a/router/src/dml_handlers/chain.rs b/router/src/dml_handlers/chain.rs
index c661a64a3d..13c5d29ff3 100644
--- a/router/src/dml_handlers/chain.rs
+++ b/router/src/dml_handlers/chain.rs
@@ -1,5 +1,5 @@
use async_trait::async_trait;
-use data_types::{DeletePredicate, NamespaceId, NamespaceName};
+use data_types::{NamespaceId, NamespaceName};
use trace::ctx::SpanContext;
use super::{DmlError, DmlHandler};
@@ -53,7 +53,6 @@ where
// All errors are converted into DML errors before returning to the caller
// in order to present a consistent error type for chained handlers.
type WriteError = DmlError;
- type DeleteError = DmlError;
/// Write `batches` to `namespace`.
async fn write(
@@ -74,30 +73,4 @@ where
.await
.map_err(Into::into)
}
-
- /// Delete the data specified in `delete`.
- async fn delete(
- &self,
- namespace: &NamespaceName<'static>,
- namespace_id: NamespaceId,
- table_name: &str,
- predicate: &DeletePredicate,
- span_ctx: Option<SpanContext>,
- ) -> Result<(), Self::DeleteError> {
- self.first
- .delete(
- namespace,
- namespace_id,
- table_name,
- predicate,
- span_ctx.clone(),
- )
- .await
- .map_err(Into::into)?;
-
- self.second
- .delete(namespace, namespace_id, table_name, predicate, span_ctx)
- .await
- .map_err(Into::into)
- }
}
diff --git a/router/src/dml_handlers/fan_out.rs b/router/src/dml_handlers/fan_out.rs
index dd501a02f8..3e561c64ea 100644
--- a/router/src/dml_handlers/fan_out.rs
+++ b/router/src/dml_handlers/fan_out.rs
@@ -1,7 +1,7 @@
use std::{fmt::Debug, marker::PhantomData};
use async_trait::async_trait;
-use data_types::{DeletePredicate, NamespaceId, NamespaceName};
+use data_types::{NamespaceId, NamespaceName};
use futures::{stream::FuturesUnordered, TryStreamExt};
use trace::ctx::SpanContext;
@@ -42,7 +42,6 @@ where
type WriteInput = I;
type WriteOutput = ();
type WriteError = T::WriteError;
- type DeleteError = T::DeleteError;
/// Concurrently execute the write inputs in `input` against the inner
/// handler, returning early and aborting in-flight writes if an error
@@ -70,18 +69,4 @@ where
.await?;
Ok(())
}
-
- /// Pass the delete through to the inner handler.
- async fn delete(
- &self,
- namespace: &NamespaceName<'static>,
- namespace_id: NamespaceId,
- table_name: &str,
- predicate: &DeletePredicate,
- span_ctx: Option<SpanContext>,
- ) -> Result<(), Self::DeleteError> {
- self.inner
- .delete(namespace, namespace_id, table_name, predicate, span_ctx)
- .await
- }
}
diff --git a/router/src/dml_handlers/instrumentation.rs b/router/src/dml_handlers/instrumentation.rs
index ff93ecc9ab..4d769c5621 100644
--- a/router/src/dml_handlers/instrumentation.rs
+++ b/router/src/dml_handlers/instrumentation.rs
@@ -1,11 +1,8 @@
use async_trait::async_trait;
-use data_types::{DeletePredicate, NamespaceId, NamespaceName};
+use data_types::{NamespaceId, NamespaceName};
use iox_time::{SystemProvider, TimeProvider};
use metric::{DurationHistogram, Metric};
-use trace::{
- ctx::SpanContext,
- span::{SpanExt, SpanRecorder},
-};
+use trace::{ctx::SpanContext, span::SpanRecorder};
use super::DmlHandler;
@@ -20,9 +17,6 @@ pub struct InstrumentationDecorator<T, P = SystemProvider> {
write_success: DurationHistogram,
write_error: DurationHistogram,
-
- delete_success: DurationHistogram,
- delete_error: DurationHistogram,
}
impl<T> InstrumentationDecorator<T> {
@@ -31,25 +25,16 @@ impl<T> InstrumentationDecorator<T> {
pub fn new(name: &'static str, registry: &metric::Registry, inner: T) -> Self {
let write: Metric<DurationHistogram> =
registry.register_metric("dml_handler_write_duration", "write handler call duration");
- let delete: Metric<DurationHistogram> = registry.register_metric(
- "dml_handler_delete_duration",
- "delete handler call duration",
- );
let write_success = write.recorder(&[("handler", name), ("result", "success")]);
let write_error = write.recorder(&[("handler", name), ("result", "error")]);
- let delete_success = delete.recorder(&[("handler", name), ("result", "success")]);
- let delete_error = delete.recorder(&[("handler", name), ("result", "error")]);
-
Self {
name,
inner,
time_provider: Default::default(),
write_success,
write_error,
- delete_success,
- delete_error,
}
}
}
@@ -61,7 +46,6 @@ where
{
type WriteInput = T::WriteInput;
type WriteError = T::WriteError;
- type DeleteError = T::DeleteError;
type WriteOutput = T::WriteOutput;
/// Call the inner `write` method and record the call latency.
@@ -100,43 +84,6 @@ where
res
}
-
- /// Call the inner `delete` method and record the call latency.
- async fn delete(
- &self,
- namespace: &NamespaceName<'static>,
- namespace_id: NamespaceId,
- table_name: &str,
- predicate: &DeletePredicate,
- span_ctx: Option<SpanContext>,
- ) -> Result<(), Self::DeleteError> {
- let t = self.time_provider.now();
-
- // Create a tracing span for this handler.
- let mut span_recorder = SpanRecorder::new(span_ctx.child_span(self.name));
-
- let res = self
- .inner
- .delete(namespace, namespace_id, table_name, predicate, span_ctx)
- .await;
-
- // Avoid exploding if time goes backwards - simply drop the measurement
- // if it happens.
- if let Some(delta) = self.time_provider.now().checked_duration_since(t) {
- match &res {
- Ok(_) => {
- span_recorder.ok("success");
- self.delete_success.record(delta)
- }
- Err(e) => {
- span_recorder.error(e.to_string());
- self.delete_error.record(delta)
- }
- };
- }
-
- res
- }
}
#[cfg(test)]
@@ -144,7 +91,6 @@ mod tests {
use std::sync::Arc;
use assert_matches::assert_matches;
- use data_types::TimestampRange;
use metric::Attributes;
use trace::{span::SpanStatus, RingBufferTraceCollector, TraceCollector};
@@ -234,57 +180,4 @@ mod tests {
assert_metric_hit(&metrics, "dml_handler_write_duration", "error");
assert_trace(traces, SpanStatus::Err);
}
-
- #[tokio::test]
- async fn test_delete_ok() {
- let ns = "platanos".try_into().unwrap();
- let handler = Arc::new(MockDmlHandler::<()>::default().with_delete_return([Ok(())]));
-
- let metrics = Arc::new(metric::Registry::default());
- let traces: Arc<dyn TraceCollector> = Arc::new(RingBufferTraceCollector::new(5));
- let span = SpanContext::new(Arc::clone(&traces));
-
- let decorator = InstrumentationDecorator::new(HANDLER_NAME, &metrics, handler);
-
- let pred = DeletePredicate {
- range: TimestampRange::new(1, 2),
- exprs: vec![],
- };
-
- decorator
- .delete(&ns, NamespaceId::new(42), "a table", &pred, Some(span))
- .await
- .expect("inner handler configured to succeed");
-
- assert_metric_hit(&metrics, "dml_handler_delete_duration", "success");
- assert_trace(traces, SpanStatus::Ok);
- }
-
- #[tokio::test]
- async fn test_delete_err() {
- let ns = "platanos".try_into().unwrap();
- let handler = Arc::new(
- MockDmlHandler::<()>::default()
- .with_delete_return([Err(DmlError::NamespaceNotFound("nope".to_owned()))]),
- );
-
- let metrics = Arc::new(metric::Registry::default());
- let traces: Arc<dyn TraceCollector> = Arc::new(RingBufferTraceCollector::new(5));
- let span = SpanContext::new(Arc::clone(&traces));
-
- let decorator = InstrumentationDecorator::new(HANDLER_NAME, &metrics, handler);
-
- let pred = DeletePredicate {
- range: TimestampRange::new(1, 2),
- exprs: vec![],
- };
-
- decorator
- .delete(&ns, NamespaceId::new(42), "a table", &pred, Some(span))
- .await
- .expect_err("inner handler configured to fail");
-
- assert_metric_hit(&metrics, "dml_handler_delete_duration", "error");
- assert_trace(traces, SpanStatus::Err);
- }
}
diff --git a/router/src/dml_handlers/mock.rs b/router/src/dml_handlers/mock.rs
index a2856e0a46..9fdf74895a 100644
--- a/router/src/dml_handlers/mock.rs
+++ b/router/src/dml_handlers/mock.rs
@@ -1,7 +1,7 @@
use std::{collections::VecDeque, fmt::Debug};
use async_trait::async_trait;
-use data_types::{DeletePredicate, NamespaceId, NamespaceName};
+use data_types::{NamespaceId, NamespaceName};
use parking_lot::Mutex;
use trace::ctx::SpanContext;
@@ -16,19 +16,12 @@ pub enum MockDmlHandlerCall<W> {
namespace_id: NamespaceId,
write_input: W,
},
- Delete {
- namespace: String,
- namespace_id: NamespaceId,
- table: String,
- predicate: DeletePredicate,
- },
}
#[derive(Debug)]
struct Inner<W> {
calls: Vec<MockDmlHandlerCall<W>>,
write_return: VecDeque<Result<(), DmlError>>,
- delete_return: VecDeque<Result<(), DmlError>>,
}
impl<W> Default for Inner<W> {
@@ -36,7 +29,6 @@ impl<W> Default for Inner<W> {
Self {
calls: Default::default(),
write_return: Default::default(),
- delete_return: Default::default(),
}
}
}
@@ -65,11 +57,6 @@ where
self
}
- pub fn with_delete_return(self, ret: impl Into<VecDeque<Result<(), DmlError>>>) -> Self {
- self.0.lock().delete_return = ret.into();
- self
- }
-
pub fn calls(&self) -> Vec<MockDmlHandlerCall<W>> {
self.0.lock().calls.clone()
}
@@ -93,7 +80,6 @@ where
W: Debug + Send + Sync,
{
type WriteError = DmlError;
- type DeleteError = DmlError;
type WriteInput = W;
type WriteOutput = ();
@@ -114,24 +100,4 @@ where
write_return
)
}
-
- async fn delete(
- &self,
- namespace: &NamespaceName<'static>,
- namespace_id: NamespaceId,
- table_name: &str,
- predicate: &DeletePredicate,
- _span_ctx: Option<SpanContext>,
- ) -> Result<(), Self::DeleteError> {
- record_and_return!(
- self,
- MockDmlHandlerCall::Delete {
- namespace: namespace.into(),
- namespace_id,
- table: table_name.to_owned(),
- predicate: predicate.clone(),
- },
- delete_return
- )
- }
}
diff --git a/router/src/dml_handlers/nop.rs b/router/src/dml_handlers/nop.rs
index bdd09a2b09..472ea652e0 100644
--- a/router/src/dml_handlers/nop.rs
+++ b/router/src/dml_handlers/nop.rs
@@ -3,7 +3,7 @@
use std::{fmt::Debug, marker::PhantomData};
use async_trait::async_trait;
-use data_types::{DeletePredicate, NamespaceId, NamespaceName};
+use data_types::{NamespaceId, NamespaceName};
use observability_deps::tracing::*;
use trace::ctx::SpanContext;
@@ -25,7 +25,6 @@ where
T: Debug + Send + Sync,
{
type WriteError = DmlError;
- type DeleteError = DmlError;
type WriteInput = T;
type WriteOutput = T;
@@ -39,16 +38,4 @@ where
info!(%namespace, %namespace_id, ?batches, "dropping write operation");
Ok(batches)
}
-
- async fn delete(
- &self,
- namespace: &NamespaceName<'static>,
- namespace_id: NamespaceId,
- table_name: &str,
- predicate: &DeletePredicate,
- _span_ctx: Option<SpanContext>,
- ) -> Result<(), Self::DeleteError> {
- info!(%namespace, %namespace_id, %table_name, ?predicate, "dropping delete operation");
- Ok(())
- }
}
diff --git a/router/src/dml_handlers/partitioner.rs b/router/src/dml_handlers/partitioner.rs
index c6aeea61a8..f0a031d70f 100644
--- a/router/src/dml_handlers/partitioner.rs
+++ b/router/src/dml_handlers/partitioner.rs
@@ -1,7 +1,5 @@
use async_trait::async_trait;
-use data_types::{
- DeletePredicate, NamespaceId, NamespaceName, PartitionKey, PartitionTemplate, TableId,
-};
+use data_types::{NamespaceId, NamespaceName, PartitionKey, PartitionTemplate, TableId};
use hashbrown::HashMap;
use mutable_batch::{MutableBatch, PartitionWrite, WritePayload};
use observability_deps::tracing::*;
@@ -64,7 +62,6 @@ impl Partitioner {
#[async_trait]
impl DmlHandler for Partitioner {
type WriteError = PartitionError;
- type DeleteError = PartitionError;
type WriteInput = HashMap<TableId, (String, MutableBatch)>;
type WriteOutput = Vec<Partitioned<Self::WriteInput>>;
@@ -104,18 +101,6 @@ impl DmlHandler for Partitioner {
.map(|(key, batch)| Partitioned::new(key, batch))
.collect::<Vec<_>>())
}
-
- /// Pass the delete request through unmodified to the next handler.
- async fn delete(
- &self,
- _namespace: &NamespaceName<'static>,
- _namespace_id: NamespaceId,
- _table_name: &str,
- _predicate: &DeletePredicate,
- _span_ctx: Option<SpanContext>,
- ) -> Result<(), Self::DeleteError> {
- Ok(())
- }
}
#[cfg(test)]
diff --git a/router/src/dml_handlers/retention_validation.rs b/router/src/dml_handlers/retention_validation.rs
index 0e426be1c9..963b3ab543 100644
--- a/router/src/dml_handlers/retention_validation.rs
+++ b/router/src/dml_handlers/retention_validation.rs
@@ -1,5 +1,5 @@
use async_trait::async_trait;
-use data_types::{DeletePredicate, NamespaceId, NamespaceName};
+use data_types::{NamespaceId, NamespaceName};
use hashbrown::HashMap;
use iox_time::{SystemProvider, TimeProvider};
use mutable_batch::MutableBatch;
@@ -53,7 +53,6 @@ where
C: NamespaceCache<ReadError = iox_catalog::interface::Error>, // The handler expects the cache to read from the catalog if necessary.
{
type WriteError = RetentionError;
- type DeleteError = RetentionError;
type WriteInput = HashMap<String, MutableBatch>;
type WriteOutput = Self::WriteInput;
@@ -87,18 +86,6 @@ where
Ok(batch)
}
-
- /// Pass the delete request through unmodified to the next handler.
- async fn delete(
- &self,
- _namespace: &NamespaceName<'static>,
- _namespace_id: NamespaceId,
- _table_name: &str,
- _predicate: &DeletePredicate,
- _span_ctx: Option<SpanContext>,
- ) -> Result<(), Self::DeleteError> {
- Ok(())
- }
}
#[cfg(test)]
diff --git a/router/src/dml_handlers/rpc_write.rs b/router/src/dml_handlers/rpc_write.rs
index 7d7d47b92f..9fc0e75561 100644
--- a/router/src/dml_handlers/rpc_write.rs
+++ b/router/src/dml_handlers/rpc_write.rs
@@ -16,7 +16,7 @@ use self::{
use super::{DmlHandler, Partitioned};
use async_trait::async_trait;
-use data_types::{DeletePredicate, NamespaceId, NamespaceName, TableId};
+use data_types::{NamespaceId, NamespaceName, TableId};
use dml::{DmlMeta, DmlWrite};
use generated_types::influxdata::iox::ingester::v1::WriteRequest;
use hashbrown::HashMap;
@@ -51,10 +51,6 @@ pub enum RpcWriteError {
#[error("upstream {0} is not connected")]
UpstreamNotConnected(String),
- /// A delete request was rejected (not supported).
- #[error("deletes are not supported")]
- DeletesUnsupported,
-
/// The write request was not attempted, because not enough upstream
/// ingesters needed to satisfy the configured replication factor are
/// healthy.
@@ -176,7 +172,6 @@ where
type WriteOutput = Vec<DmlMeta>;
type WriteError = RpcWriteError;
- type DeleteError = RpcWriteError;
async fn write(
&self,
@@ -265,24 +260,6 @@ where
Ok(vec![op.meta().clone()])
}
-
- async fn delete(
- &self,
- namespace: &NamespaceName<'static>,
- namespace_id: NamespaceId,
- table_name: &str,
- _predicate: &DeletePredicate,
- _span_ctx: Option<SpanContext>,
- ) -> Result<(), RpcWriteError> {
- warn!(
- %namespace,
- %namespace_id,
- %table_name,
- "dropping delete request"
- );
-
- Err(RpcWriteError::DeletesUnsupported)
- }
}
/// Perform an RPC write with `req` against one of the upstream ingesters in
diff --git a/router/src/dml_handlers/rpc_write/circuit_breaking_client.rs b/router/src/dml_handlers/rpc_write/circuit_breaking_client.rs
index 7ed818e7a5..044e71281a 100644
--- a/router/src/dml_handlers/rpc_write/circuit_breaking_client.rs
+++ b/router/src/dml_handlers/rpc_write/circuit_breaking_client.rs
@@ -195,7 +195,7 @@ mod tests {
async fn test_observe() {
let circuit_breaker = Arc::new(MockCircuitBreaker::default());
let mock_client = Arc::new(MockWriteClient::default().with_ret(Box::new(
- [Ok(()), Err(RpcWriteError::DeletesUnsupported)].into_iter(),
+ [Ok(()), Err(RpcWriteError::NoUpstreams)].into_iter(),
)));
let wrapper = CircuitBreakingClient::new(Arc::clone(&mock_client), "bananas")
.with_circuit_breaker(Arc::clone(&circuit_breaker));
diff --git a/router/src/dml_handlers/rpc_write/lazy_connector.rs b/router/src/dml_handlers/rpc_write/lazy_connector.rs
index 3361cd95b5..ff4cc3d712 100644
--- a/router/src/dml_handlers/rpc_write/lazy_connector.rs
+++ b/router/src/dml_handlers/rpc_write/lazy_connector.rs
@@ -128,7 +128,6 @@ fn is_envoy_unavailable_error(e: &RpcWriteError) -> bool {
| RpcWriteError::Timeout(_)
| RpcWriteError::NoUpstreams
| RpcWriteError::UpstreamNotConnected(_)
- | RpcWriteError::DeletesUnsupported
| RpcWriteError::PartialWrite { .. }
| RpcWriteError::NotEnoughReplicas => false,
}
diff --git a/router/src/dml_handlers/schema_validation.rs b/router/src/dml_handlers/schema_validation.rs
index 86736a7692..ed2938f333 100644
--- a/router/src/dml_handlers/schema_validation.rs
+++ b/router/src/dml_handlers/schema_validation.rs
@@ -1,7 +1,7 @@
use std::{ops::DerefMut, sync::Arc};
use async_trait::async_trait;
-use data_types::{DeletePredicate, NamespaceId, NamespaceName, NamespaceSchema, TableId};
+use data_types::{NamespaceId, NamespaceName, NamespaceSchema, TableId};
use hashbrown::HashMap;
use iox_catalog::{
interface::{Catalog, Error as CatalogError},
@@ -145,7 +145,6 @@ where
C: NamespaceCache<ReadError = iox_catalog::interface::Error>, // The handler expects the cache to read from the catalog if necessary.
{
type WriteError = SchemaError;
- type DeleteError = SchemaError;
// Accepts a map of TableName -> MutableBatch
type WriteInput = HashMap<String, MutableBatch>;
@@ -315,19 +314,6 @@ where
Ok(batches)
}
-
- /// This call is passed through to `D` - no schema validation is performed
- /// on deletes.
- async fn delete(
- &self,
- _namespace: &NamespaceName<'static>,
- _namespace_id: NamespaceId,
- _table_name: &str,
- _predicate: &DeletePredicate,
- _span_ctx: Option<SpanContext>,
- ) -> Result<(), Self::DeleteError> {
- Ok(())
- }
}
/// An error returned by schema limit evaluation against a cached
@@ -461,7 +447,7 @@ mod tests {
use std::sync::Arc;
use assert_matches::assert_matches;
- use data_types::{ColumnType, TimestampRange};
+ use data_types::ColumnType;
use iox_tests::{TestCatalog, TestNamespace};
use once_cell::sync::Lazy;
@@ -956,29 +942,4 @@ mod tests {
assert_matches!(err, SchemaError::ServiceLimit(_));
assert_eq!(1, handler.service_limit_hit_columns.fetch());
}
-
- #[tokio::test]
- async fn test_write_delete_passthrough_ok() {
- const NAMESPACE: &str = "NAMESPACE_IS_NOT_VALIDATED";
- const TABLE: &str = "bananas";
-
- let (catalog, _namespace) = test_setup().await;
- let metrics = Arc::new(metric::Registry::default());
- let handler = SchemaValidator::new(catalog.catalog(), setup_test_cache(&catalog), &metrics);
-
- let predicate = DeletePredicate {
- range: TimestampRange::new(1, 2),
- exprs: vec![],
- };
-
- let ns = NamespaceName::try_from(NAMESPACE).unwrap();
-
- handler
- .delete(&ns, NamespaceId::new(42), TABLE, &predicate, None)
- .await
- .expect("request should succeed");
-
- // Deletes have no effect on the cache.
- assert!(handler.cache.get_schema(&ns).await.is_err());
- }
}
diff --git a/router/src/dml_handlers/trait.rs b/router/src/dml_handlers/trait.rs
index eafc8088ed..1039d2d268 100644
--- a/router/src/dml_handlers/trait.rs
+++ b/router/src/dml_handlers/trait.rs
@@ -2,7 +2,7 @@ use super::{
partitioner::PartitionError, retention_validation::RetentionError, RpcWriteError, SchemaError,
};
use async_trait::async_trait;
-use data_types::{DeletePredicate, NamespaceId, NamespaceName};
+use data_types::{NamespaceId, NamespaceName};
use std::{error::Error, fmt::Debug, sync::Arc};
use thiserror::Error;
use trace::ctx::SpanContext;
@@ -57,9 +57,6 @@ pub trait DmlHandler: Debug + Send + Sync {
/// All errors must be mappable into the concrete [`DmlError`] type.
type WriteError: Error + Into<DmlError> + Send;
- /// The error type of the delete handler.
- type DeleteError: Error + Into<DmlError> + Send;
-
/// Write `batches` to `namespace`.
async fn write(
&self,
@@ -68,16 +65,6 @@ pub trait DmlHandler: Debug + Send + Sync {
input: Self::WriteInput,
span_ctx: Option<SpanContext>,
) -> Result<Self::WriteOutput, Self::WriteError>;
-
- /// Delete the data specified in `delete`.
- async fn delete(
- &self,
- namespace: &NamespaceName<'static>,
- namespace_id: NamespaceId,
- table_name: &str,
- predicate: &DeletePredicate,
- span_ctx: Option<SpanContext>,
- ) -> Result<(), Self::DeleteError>;
}
#[async_trait]
@@ -88,7 +75,6 @@ where
type WriteInput = T::WriteInput;
type WriteOutput = T::WriteOutput;
type WriteError = T::WriteError;
- type DeleteError = T::DeleteError;
async fn write(
&self,
@@ -101,18 +87,4 @@ where
.write(namespace, namespace_id, input, span_ctx)
.await
}
-
- /// Delete the data specified in `delete`.
- async fn delete(
- &self,
- namespace: &NamespaceName<'static>,
- namespace_id: NamespaceId,
- table_name: &str,
- predicate: &DeletePredicate,
- span_ctx: Option<SpanContext>,
- ) -> Result<(), Self::DeleteError> {
- (**self)
- .delete(namespace, namespace_id, table_name, predicate, span_ctx)
- .await
- }
}
diff --git a/router/src/server/http.rs b/router/src/server/http.rs
index fd5440b17c..20c5dc8087 100644
--- a/router/src/server/http.rs
+++ b/router/src/server/http.rs
@@ -1,6 +1,5 @@
//! HTTP service implementations for `router`.
-mod delete_predicate;
pub mod write;
use authz::{Action, Authorizer, Permission, Resource};
@@ -13,19 +12,15 @@ use metric::{DurationHistogram, U64Counter};
use mutable_batch::MutableBatch;
use mutable_batch_lp::LinesConverter;
use observability_deps::tracing::*;
-use predicate::delete_predicate::parse_delete_predicate;
use server_util::authorization::AuthorizationHeaderExtension;
use std::{str::Utf8Error, sync::Arc, time::Instant};
use thiserror::Error;
use tokio::sync::{Semaphore, TryAcquireError};
use trace::ctx::SpanContext;
-use self::{
- delete_predicate::parse_http_delete_request,
- write::{
- multi_tenant::MultiTenantExtractError, single_tenant::SingleTenantExtractError,
- WriteParamExtractor, WriteParams,
- },
+use self::write::{
+ multi_tenant::MultiTenantExtractError, single_tenant::SingleTenantExtractError,
+ WriteParamExtractor, WriteParams,
};
use crate::{
dml_handlers::{
@@ -41,6 +36,10 @@ pub enum Error {
#[error("not found")]
NoHandler,
+ /// A delete request was rejected (not supported).
+ #[error("deletes are not supported")]
+ DeletesUnsupported,
+
/// An error parsing a single-tenant HTTP request.
#[error(transparent)]
SingleTenantError(#[from] SingleTenantExtractError),
@@ -77,14 +76,6 @@ pub enum Error {
#[error("failed to parse line protocol: {0}")]
ParseLineProtocol(mutable_batch_lp::Error),
- /// Failure to parse the request delete predicate.
- #[error("failed to parse delete predicate: {0}")]
- ParseDelete(#[from] predicate::delete_predicate::Error),
-
- /// Failure to parse the delete predicate in the http request
- #[error("failed to parse delete predicate from http request: {0}")]
- ParseHttpDelete(#[from] self::delete_predicate::Error),
-
/// An error returned from the [`DmlHandler`].
#[error("dml handler error: {0}")]
DmlHandler(#[from] DmlError),
@@ -120,13 +111,12 @@ impl Error {
pub fn as_status_code(&self) -> StatusCode {
match self {
Error::NoHandler => StatusCode::NOT_FOUND,
+ Error::DeletesUnsupported => StatusCode::NOT_IMPLEMENTED,
Error::ClientHangup(_) => StatusCode::BAD_REQUEST,
Error::InvalidGzip(_) => StatusCode::BAD_REQUEST,
Error::NonUtf8ContentHeader(_) => StatusCode::BAD_REQUEST,
Error::NonUtf8Body(_) => StatusCode::BAD_REQUEST,
Error::ParseLineProtocol(_) => StatusCode::BAD_REQUEST,
- Error::ParseDelete(_) => StatusCode::BAD_REQUEST,
- Error::ParseHttpDelete(_) => StatusCode::BAD_REQUEST,
Error::RequestSizeExceeded(_) => StatusCode::PAYLOAD_TOO_LARGE,
Error::InvalidContentEncoding(_) => {
// https://www.rfc-editor.org/rfc/rfc7231#section-6.5.13
@@ -185,7 +175,6 @@ impl From<&DmlError> for StatusCode {
}
DmlError::Retention(RetentionError::OutsideRetention(_)) => StatusCode::FORBIDDEN,
DmlError::RpcWrite(RpcWriteError::Upstream(_)) => StatusCode::INTERNAL_SERVER_ERROR,
- DmlError::RpcWrite(RpcWriteError::DeletesUnsupported) => StatusCode::NOT_IMPLEMENTED,
DmlError::RpcWrite(RpcWriteError::Timeout(_)) => StatusCode::GATEWAY_TIMEOUT,
DmlError::RpcWrite(
RpcWriteError::NoUpstreams
@@ -226,7 +215,6 @@ pub struct HttpDelegate<D, N, T = SystemProvider> {
write_metric_fields: U64Counter,
write_metric_tables: U64Counter,
write_metric_body_size: U64Counter,
- delete_metric_body_size: U64Counter,
request_limit_rejected: U64Counter,
}
@@ -269,12 +257,6 @@ impl<D, N> HttpDelegate<D, N, SystemProvider> {
"cumulative byte size of successfully routed (decompressed) line protocol write requests",
)
.recorder(&[]);
- let delete_metric_body_size = metrics
- .register_metric::<U64Counter>(
- "http_delete_body_bytes",
- "cumulative byte size of successfully routed (decompressed) delete requests",
- )
- .recorder(&[]);
let request_limit_rejected = metrics
.register_metric::<U64Counter>(
"http_request_limit_rejected",
@@ -301,7 +283,6 @@ impl<D, N> HttpDelegate<D, N, SystemProvider> {
write_metric_fields,
write_metric_tables,
write_metric_body_size,
- delete_metric_body_size,
request_limit_rejected,
}
}
@@ -343,7 +324,7 @@ where
let dml_info = self.write_param_extractor.parse_v2(&req)?;
self.write_handler(req, dml_info).await
}
- (&Method::POST, "/api/v2/delete") => self.delete_handler(req).await,
+ (&Method::POST, "/api/v2/delete") => return Err(Error::DeletesUnsupported),
_ => return Err(Error::NoHandler),
}
.map(|_summary| {
@@ -440,55 +421,6 @@ where
Ok(())
}
- async fn delete_handler(&self, req: Request<Body>) -> Result<(), Error> {
- let span_ctx: Option<SpanContext> = req.extensions().get().cloned();
- let write_info = self.write_param_extractor.parse_v2(&req)?;
-
- trace!(namespace=%write_info.namespace, "processing delete request");
-
- // Read the HTTP body and convert it to a str.
- let body = self.read_body(req).await?;
- let body = std::str::from_utf8(&body).map_err(Error::NonUtf8Body)?;
-
- // Parse and extract table name (which can be empty), start, stop, and predicate
- let parsed_delete = parse_http_delete_request(body)?;
- let predicate = parse_delete_predicate(
- &parsed_delete.start_time,
- &parsed_delete.stop_time,
- &parsed_delete.predicate,
- )?;
-
- debug!(
- table_name=%parsed_delete.table_name,
- predicate = %parsed_delete.predicate,
- start=%parsed_delete.start_time,
- stop=%parsed_delete.stop_time,
- body_size=body.len(),
- namespace=%write_info.namespace,
- "routing delete"
- );
-
- let namespace_id = self
- .namespace_resolver
- .get_namespace_id(&write_info.namespace)
- .await?;
-
- self.dml_handler
- .delete(
- &write_info.namespace,
- namespace_id,
- parsed_delete.table_name.as_str(),
- &predicate,
- span_ctx,
- )
- .await
- .map_err(Into::into)?;
-
- self.delete_metric_body_size.inc(body.len() as _);
-
- Ok(())
- }
-
/// Parse the request's body into raw bytes, applying the configured size
/// limits and decoding any content encoding.
async fn read_body(&self, req: hyper::Request<Body>) -> Result<Bytes, Error> {
@@ -687,7 +619,6 @@ mod tests {
.with_mapping(NAMESPACE_NAME, NAMESPACE_ID);
let dml_handler = Arc::new(MockDmlHandler::default()
.with_write_return($dml_write_handler)
- .with_delete_return($dml_delete_handler)
);
let metrics = Arc::new(metric::Registry::default());
let delegate = HttpDelegate::new(
@@ -773,30 +704,6 @@ mod tests {
};
}
- // Wrapper over test_http_handler specifically for delete requests.
- macro_rules! test_delete_handler {
- (
- $name:ident,
- query_string = $query_string:expr, // Request URI query string
- body = $body:expr, // Request body content
- dml_handler = $dml_handler:expr, // DML delete handler response (if called)
- want_result = $want_result:pat,
- want_dml_calls = $($want_dml_calls:tt )+
- ) => {
- paste::paste! {
- test_http_handler!(
- [<delete_ $name>],
- uri = format!("https://bananas.example/api/v2/delete{}", $query_string),
- body = $body,
- dml_write_handler = [],
- dml_delete_handler = $dml_handler,
- want_result = $want_result,
- want_dml_calls = $($want_dml_calls)+
- );
- }
- };
- }
-
test_write_handler!(
ok,
query_string = "?org=bananas&bucket=test",
@@ -1029,118 +936,6 @@ mod tests {
want_dml_calls = []
);
- test_delete_handler!(
- ok,
- query_string = "?org=bananas&bucket=test",
- body = r#"{"start":"2021-04-01T14:00:00Z","stop":"2021-04-02T14:00:00Z", "predicate":"_measurement=its_a_table and location=Boston"}"#.as_bytes(),
- dml_handler = [Ok(())],
- want_result = Ok(_),
- want_dml_calls = [MockDmlHandlerCall::Delete{namespace, namespace_id, table, predicate}] => {
- assert_eq!(table, "its_a_table");
- assert_eq!(namespace, NAMESPACE_NAME);
- assert_eq!(*namespace_id, NAMESPACE_ID);
- assert!(!predicate.exprs.is_empty());
- }
- );
-
- test_delete_handler!(
- invalid_delete_body,
- query_string = "?org=bananas&bucket=test",
- body = r#"{wat}"#.as_bytes(),
- dml_handler = [],
- want_result = Err(Error::ParseHttpDelete(_)),
- want_dml_calls = []
- );
-
- test_delete_handler!(
- no_query_params,
- query_string = "",
- body = "".as_bytes(),
- dml_handler = [Ok(())],
- want_result = Err(Error::MultiTenantError(
- MultiTenantExtractError::ParseV2Request(V2WriteParseError::NoQueryParams)
- )),
- want_dml_calls = [] // None
- );
-
- test_delete_handler!(
- no_org_bucket,
- query_string = "?",
- body = "".as_bytes(),
- dml_handler = [Ok(())],
- want_result = Err(Error::MultiTenantError(
- MultiTenantExtractError::InvalidOrgAndBucket(
- OrgBucketMappingError::NoOrgBucketSpecified
- )
- )),
- want_dml_calls = [] // None
- );
-
- test_delete_handler!(
- empty_org_bucket,
- query_string = "?org=&bucket=",
- body = "".as_bytes(),
- dml_handler = [Ok(())],
- want_result = Err(Error::MultiTenantError(
- MultiTenantExtractError::InvalidOrgAndBucket(
- OrgBucketMappingError::NoOrgBucketSpecified
- )
- )),
- want_dml_calls = [] // None
- );
-
- test_delete_handler!(
- invalid_org_bucket,
- query_string = format!("?org=test&bucket={}", "A".repeat(1000)),
- body = "".as_bytes(),
- dml_handler = [Ok(())],
- want_result = Err(Error::MultiTenantError(
- MultiTenantExtractError::InvalidOrgAndBucket(
- OrgBucketMappingError::InvalidNamespaceName(
- NamespaceNameError::LengthConstraint { .. }
- )
- )
- )),
- want_dml_calls = [] // None
- );
-
- test_delete_handler!(
- non_utf8_body,
- query_string = "?org=bananas&bucket=test",
- body = vec![0xc3, 0x28],
- dml_handler = [Ok(())],
- want_result = Err(Error::NonUtf8Body(_)),
- want_dml_calls = [] // None
- );
-
- test_delete_handler!(
- db_not_found,
- query_string = "?org=bananas&bucket=test",
- body = r#"{"start":"2021-04-01T14:00:00Z","stop":"2021-04-02T14:00:00Z", "predicate":"_measurement=its_a_table and location=Boston"}"#.as_bytes(),
- dml_handler = [Err(DmlError::NamespaceNotFound(NAMESPACE_NAME.to_string()))],
- want_result = Err(Error::DmlHandler(DmlError::NamespaceNotFound(_))),
- want_dml_calls = [MockDmlHandlerCall::Delete{namespace, namespace_id, table, predicate}] => {
- assert_eq!(table, "its_a_table");
- assert_eq!(namespace, NAMESPACE_NAME);
- assert_eq!(*namespace_id, NAMESPACE_ID);
- assert!(!predicate.exprs.is_empty());
- }
- );
-
- test_delete_handler!(
- dml_handler_error,
- query_string = "?org=bananas&bucket=test",
- body = r#"{"start":"2021-04-01T14:00:00Z","stop":"2021-04-02T14:00:00Z", "predicate":"_measurement=its_a_table and location=Boston"}"#.as_bytes(),
- dml_handler = [Err(DmlError::Internal("💣".into()))],
- want_result = Err(Error::DmlHandler(DmlError::Internal(_))),
- want_dml_calls = [MockDmlHandlerCall::Delete{namespace, namespace_id, table, predicate}] => {
- assert_eq!(table, "its_a_table");
- assert_eq!(namespace, NAMESPACE_NAME);
- assert_eq!(*namespace_id, NAMESPACE_ID);
- assert!(!predicate.exprs.is_empty());
- }
- );
-
test_http_handler!(
not_found,
uri = "https://bananas.example/wat",
@@ -1429,11 +1224,7 @@ mod tests {
let mock_namespace_resolver =
MockNamespaceResolver::default().with_mapping(NAMESPACE_NAME, NamespaceId::new(42));
- let dml_handler = Arc::new(
- MockDmlHandler::default()
- .with_write_return([Ok(())])
- .with_delete_return([]),
- );
+ let dml_handler = Arc::new(MockDmlHandler::default().with_write_return([Ok(())]));
let metrics = Arc::new(metric::Registry::default());
let authz = Arc::new(MockAuthorizer {});
let delegate = HttpDelegate::new(
@@ -1511,11 +1302,7 @@ mod tests {
let mock_namespace_resolver =
MockNamespaceResolver::default().with_mapping(NAMESPACE_NAME, NamespaceId::new(42));
- let dml_handler = Arc::new(
- MockDmlHandler::default()
- .with_write_return([Ok(())])
- .with_delete_return([]),
- );
+ let dml_handler = Arc::new(MockDmlHandler::default().with_write_return([Ok(())]));
let metrics = Arc::new(metric::Registry::default());
let delegate = HttpDelegate::new(
MAX_BYTES,
@@ -1560,11 +1347,8 @@ mod tests {
}),
));
- let dml_handler = Arc::new(
- MockDmlHandler::default()
- .with_write_return([Ok(()), Ok(()), Ok(())])
- .with_delete_return([]),
- );
+ let dml_handler =
+ Arc::new(MockDmlHandler::default().with_write_return([Ok(()), Ok(()), Ok(())]));
let metrics = Arc::new(metric::Registry::default());
let delegate = HttpDelegate::new(
MAX_BYTES,
@@ -1611,24 +1395,6 @@ mod tests {
request_parser.calls().as_slice(),
[MockExtractorCall::V1, MockExtractorCall::V2]
);
-
- // Delete requests hit the v2 parser
- let request = Request::builder()
- .uri("https://bananas.example/api/v2/delete")
- .method("POST")
- .body(Body::from(""))
- .unwrap();
- let _got = delegate.route(request).await;
-
- // The delete should have hit v2.
- assert_matches!(
- request_parser.calls().as_slice(),
- [
- MockExtractorCall::V1,
- MockExtractorCall::V2,
- MockExtractorCall::V2
- ]
- );
}
// The display text of Error gets passed through `ioxd_router::IoxHttpErrorAdaptor` then
@@ -1682,6 +1448,11 @@ mod tests {
"not found",
),
+ (
+ DeletesUnsupported,
+ "deletes are not supported",
+ ),
+
(
NonUtf8Body(std::str::from_utf8(&[0, 159]).unwrap_err()),
"body content is not valid utf8: invalid utf-8 sequence of 1 bytes from index 1",
@@ -1760,21 +1531,6 @@ mod tests {
"failed to parse line protocol: timestamp overflows i64",
),
- (
- ParseDelete({
- predicate::delete_predicate::Error::InvalidSyntax { value: "[syntax]".into() }
- }),
- "failed to parse delete predicate: Invalid predicate syntax: ([syntax])",
- ),
-
- (
- ParseHttpDelete({
- delete_predicate::Error::TableInvalid { value: "[table name]".into() }
- }),
- "failed to parse delete predicate from http request: \
- Invalid table name in delete '[table name]'"
- ),
-
(
DmlHandler(DmlError::NamespaceNotFound("[namespace name]".into())),
"dml handler error: namespace [namespace name] does not exist",
diff --git a/router/src/server/http/delete_predicate.rs b/router/src/server/http/delete_predicate.rs
deleted file mode 100644
index e8bba66450..0000000000
--- a/router/src/server/http/delete_predicate.rs
+++ /dev/null
@@ -1,231 +0,0 @@
-use snafu::{ResultExt, Snafu};
-
-/// Parse Delete Predicates
-/// Parse Error
-#[derive(Debug, Snafu)]
-pub enum Error {
- #[snafu(display(r#"Unable to parse delete string '{}'"#, value))]
- Invalid {
- source: serde_json::Error,
- value: String,
- },
-
- #[snafu(display(
- r#"Invalid key which is either 'start', 'stop', or 'predicate': '{}'"#,
- value
- ))]
- KeywordInvalid { value: String },
-
- #[snafu(display(r#"Invalid timestamp or predicate value: '{}'"#, value))]
- ValueInvalid { value: String },
-
- #[snafu(display(r#"Invalid JSON format of delete string '{}'"#, value))]
- ObjectInvalid { value: String },
-
- #[snafu(display(r#"Invalid table name in delete '{}'"#, value))]
- TableInvalid { value: String },
-
- #[snafu(display(r#"Delete must include a start time and a stop time'{}'"#, value))]
- StartStopInvalid { value: String },
-}
-
-/// Result type for Parser Cient
-pub(crate) type Result<T, E = Error> = std::result::Result<T, E>;
-
-const FLUX_TABLE: &str = "_measurement";
-
-/// Data of a parsed delete
-///
-/// Note that this struct and its functions are used to parse FLUX DELETE,
-/// <https://docs.influxdata.com/influxdb/v2.0/write-data/delete-data/>, which happens before
-/// the parsing of timestamps and sql predicate. The examples below will show FLUX DELETE's syntax which is
-/// different from SQL syntax so we need this extra parsing step before invoking sqlparser to parse the
-/// sql-format predicates and timestamps
-///
-#[derive(Debug, Default, PartialEq, Eq, Clone)]
-pub(crate) struct HttpDeleteRequest {
- /// Empty string, "", if no table specified
- pub(crate) table_name: String,
- pub(crate) start_time: String,
- pub(crate) stop_time: String,
- pub(crate) predicate: String,
-}
-
-/// Return parsed data of an influx delete:
-/// A few input examples and their parsed results:
-/// {"predicate":"_measurement=mytable AND host=\"Orient.local\"","start":"1970-01-01T00:00:00Z","stop":"2070-01-02T00:00:00Z"}
-/// => table_name="mytable", start_time="1970-01-01T00:00:00Z", end_time="2070-01-02T00:00:00Z", predicate="host=\"Orient.local\"""
-/// {"predicate":"host=Orient.local and val != 50","start":"1970-01-01T00:00:00Z","stop":"2070-01-02T00:00:00Z"}
-/// => start_time="1970-01-01T00:00:00Z", end_time="2070-01-02T00:00:00Z", predicate="host=Orient.local and val != 50"
-///
-pub(crate) fn parse_http_delete_request(input: &str) -> Result<HttpDeleteRequest> {
- let parsed_obj: serde_json::Value =
- serde_json::from_str(input).context(InvalidSnafu { value: input })?;
- let mut parsed_delete = HttpDeleteRequest::default();
-
- if let serde_json::Value::Object(items) = parsed_obj {
- for item in items {
- // The value must be type String
- if let Some(val) = item.1.as_str() {
- match item.0.to_lowercase().as_str() {
- "start" => parsed_delete.start_time = val.to_string(),
- "stop" => parsed_delete.stop_time = val.to_string(),
- "predicate" => parsed_delete.predicate = val.to_string(),
- _ => {
- return Err(Error::KeywordInvalid {
- value: input.to_string(),
- })
- }
- }
- } else {
- return Err(Error::ValueInvalid {
- value: input.to_string(),
- });
- }
- }
- } else {
- return Err(Error::ObjectInvalid {
- value: input.to_string(),
- });
- }
-
- // Start or stop is empty
- if parsed_delete.start_time.is_empty() || parsed_delete.stop_time.is_empty() {
- return Err(Error::StartStopInvalid {
- value: input.to_string(),
- });
- }
-
- // Extract table from the predicate if any
- if parsed_delete.predicate.contains(FLUX_TABLE) {
- // since predicate is a conjunctive expression, split them by "and"
- let predicate = parsed_delete
- .predicate
- .replace(" AND ", " and ")
- .replace(" ANd ", " and ")
- .replace(" And ", " and ")
- .replace(" AnD ", " and ");
-
- let split: Vec<&str> = predicate.split("and").collect();
-
- let mut predicate_no_table = "".to_string();
- for s in split {
- if s.contains(FLUX_TABLE) {
- // This should be in form "_measurement = <your_table_name>"
- // only <keep your_table_name> by replacing the rest with ""
- let table_name = s
- .replace(FLUX_TABLE, "")
- .replace('=', "")
- .trim()
- .to_string();
- // Do not support white spaces in table name
- if table_name.contains(' ') {
- return Err(Error::TableInvalid {
- value: input.to_string(),
- });
- }
- parsed_delete.table_name = table_name;
- } else {
- // This is a normal column comparison, put it back to send to sqlparser later
- if !predicate_no_table.is_empty() {
- predicate_no_table.push_str(" and ")
- }
- predicate_no_table.push_str(s.trim());
- }
- }
- parsed_delete.predicate = predicate_no_table;
- }
-
- Ok(parsed_delete)
-}
-
-#[cfg(test)]
-mod tests {
- use super::*;
-
- #[test]
- fn test_parse_http_delete_full() {
- let delete_str = r#"{"predicate":"_measurement=mytable AND host=\"Orient.local\"","start":"1970-01-01T00:00:00Z","stop":"2070-01-02T00:00:00Z"}"#;
-
- let expected = HttpDeleteRequest {
- table_name: "mytable".to_string(),
- predicate: "host=\"Orient.local\"".to_string(),
- start_time: "1970-01-01T00:00:00Z".to_string(),
- stop_time: "2070-01-02T00:00:00Z".to_string(),
- };
-
- let result = parse_http_delete_request(delete_str).unwrap();
- assert_eq!(expected, result);
- }
-
- #[test]
- fn test_parse_http_delete_no_table() {
- let delete_str = r#"{"start":"1970-01-01T00:00:00Z","stop":"2070-01-02T00:00:00Z", "predicate":"host=\"Orient.local\""}"#;
-
- let expected = HttpDeleteRequest {
- table_name: "".to_string(),
- predicate: "host=\"Orient.local\"".to_string(),
- start_time: "1970-01-01T00:00:00Z".to_string(),
- stop_time: "2070-01-02T00:00:00Z".to_string(),
- };
-
- let result = parse_http_delete_request(delete_str).unwrap();
- assert_eq!(expected, result);
- }
-
- #[test]
- fn test_parse_http_delete_empty_predicate() {
- let delete_str =
- r#"{"start":"1970-01-01T00:00:00Z","predicate":"","stop":"2070-01-02T00:00:00Z"}"#;
-
- let expected = HttpDeleteRequest {
- table_name: "".to_string(),
- predicate: "".to_string(),
- start_time: "1970-01-01T00:00:00Z".to_string(),
- stop_time: "2070-01-02T00:00:00Z".to_string(),
- };
-
- let result = parse_http_delete_request(delete_str).unwrap();
- assert_eq!(expected, result);
- }
-
- #[test]
- fn test_parse_http_delete_no_predicate() {
- let delete_str = r#"{"start":"1970-01-01T00:00:00Z","stop":"2070-01-02T00:00:00Z"}"#;
-
- let expected = HttpDeleteRequest {
- table_name: "".to_string(),
- predicate: "".to_string(),
- start_time: "1970-01-01T00:00:00Z".to_string(),
- stop_time: "2070-01-02T00:00:00Z".to_string(),
- };
-
- let result = parse_http_delete_request(delete_str).unwrap();
- assert_eq!(expected, result);
- }
-
- #[test]
- fn test_parse_http_delete_negative() {
- // invalid key
- let delete_str = r#"{"invalid":"1970-01-01T00:00:00Z","stop":"2070-01-02T00:00:00Z"}"#;
- let result = parse_http_delete_request(delete_str);
- let err = result.unwrap_err();
- assert!(err
- .to_string()
- .contains("Invalid key which is either 'start', 'stop', or 'predicate'"));
-
- // invalid timestamp value
- let delete_str = r#"{"start":123,"stop":"2070-01-02T00:00:00Z"}"#;
- let result = parse_http_delete_request(delete_str);
- let err = result.unwrap_err();
- assert!(err
- .to_string()
- .contains("Invalid timestamp or predicate value"));
-
- // invalid JSON
- let delete_str = r#"{"start":"1970-01-01T00:00:00Z",;"stop":"2070-01-02T00:00:00Z"}"#;
- let result = parse_http_delete_request(delete_str);
- let err = result.unwrap_err();
- assert!(err.to_string().contains("Unable to parse delete string"));
- }
-}
diff --git a/router/tests/http.rs b/router/tests/http.rs
index 1e93d2966e..cb1203c43c 100644
--- a/router/tests/http.rs
+++ b/router/tests/http.rs
@@ -8,7 +8,7 @@ use hyper::{Body, Request, StatusCode};
use iox_catalog::interface::SoftDeletedRows;
use iox_time::{SystemProvider, TimeProvider};
use metric::{Attributes, DurationHistogram, Metric, U64Counter};
-use router::dml_handlers::{DmlError, RetentionError, RpcWriteError, SchemaError};
+use router::dml_handlers::{DmlError, RetentionError, SchemaError};
use std::sync::Arc;
pub mod common;
@@ -384,14 +384,10 @@ async fn test_delete_unsupported() {
assert_matches!(
&err,
- e @ router::server::http::Error::DmlHandler(
- DmlError::RpcWrite(
- RpcWriteError::DeletesUnsupported
- )
- ) => {
+ e @ router::server::http::Error::DeletesUnsupported => {
assert_eq!(
e.to_string(),
- "dml handler error: deletes are not supported"
+ "deletes are not supported"
);
}
);
|
fc5697b8e7ff480e48d6b3d656a62d300647c191
|
Andrew Lamb
|
2022-11-28 12:09:40
|
Update datafusion again (N of N) (#6218)
|
* chore: Update datafusion again (4 of N)
* fix: Update plans
* fix: Update for renamed API
* fix: Update more plans
* chore: Update to datafusion @ d355f69aae2cc951cfd021e5c0b690861ba0c4ac
* fix: update explain plan tests
* fix: update test after schema error
* chore: Update datafusion again
* fix: Add size() calculation to selectors
* chore: Run cargo hakari tasks
* fix: Update newly added test
|
Co-authored-by: CircleCI[bot] <[email protected]>
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
chore: Update datafusion again (N of N) (#6218)
* chore: Update datafusion again (4 of N)
* fix: Update plans
* fix: Update for renamed API
* fix: Update more plans
* chore: Update to datafusion @ d355f69aae2cc951cfd021e5c0b690861ba0c4ac
* fix: update explain plan tests
* fix: update test after schema error
* chore: Update datafusion again
* fix: Add size() calculation to selectors
* chore: Run cargo hakari tasks
* fix: Update newly added test
Co-authored-by: CircleCI[bot] <[email protected]>
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
diff --git a/Cargo.lock b/Cargo.lock
index ecd291aa30..6b1f4241e9 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -329,6 +329,7 @@ dependencies = [
"memchr",
"pin-project-lite",
"tokio",
+ "xz2",
]
[[package]]
@@ -1236,7 +1237,7 @@ dependencies = [
[[package]]
name = "datafusion"
version = "14.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=ebb24c5bf46f2af362aebffba2012875b328e799#ebb24c5bf46f2af362aebffba2012875b328e799"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=a61615b2949bea9027eefe686613605e135780f2#a61615b2949bea9027eefe686613605e135780f2"
dependencies = [
"ahash 0.8.2",
"arrow",
@@ -1255,7 +1256,7 @@ dependencies = [
"flate2",
"futures",
"glob",
- "hashbrown 0.12.3",
+ "hashbrown 0.13.1",
"itertools",
"lazy_static",
"log",
@@ -1268,43 +1269,44 @@ dependencies = [
"pin-project-lite",
"rand",
"smallvec",
- "sqlparser 0.26.0",
+ "sqlparser",
"tempfile",
"tokio",
"tokio-stream",
"tokio-util",
"url",
"uuid",
+ "xz2",
]
[[package]]
name = "datafusion-common"
version = "14.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=ebb24c5bf46f2af362aebffba2012875b328e799#ebb24c5bf46f2af362aebffba2012875b328e799"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=a61615b2949bea9027eefe686613605e135780f2#a61615b2949bea9027eefe686613605e135780f2"
dependencies = [
"arrow",
"chrono",
"object_store",
"parquet",
- "sqlparser 0.26.0",
+ "sqlparser",
]
[[package]]
name = "datafusion-expr"
version = "14.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=ebb24c5bf46f2af362aebffba2012875b328e799#ebb24c5bf46f2af362aebffba2012875b328e799"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=a61615b2949bea9027eefe686613605e135780f2#a61615b2949bea9027eefe686613605e135780f2"
dependencies = [
"ahash 0.8.2",
"arrow",
"datafusion-common",
"log",
- "sqlparser 0.26.0",
+ "sqlparser",
]
[[package]]
name = "datafusion-optimizer"
version = "14.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=ebb24c5bf46f2af362aebffba2012875b328e799#ebb24c5bf46f2af362aebffba2012875b328e799"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=a61615b2949bea9027eefe686613605e135780f2#a61615b2949bea9027eefe686613605e135780f2"
dependencies = [
"arrow",
"async-trait",
@@ -1312,14 +1314,14 @@ dependencies = [
"datafusion-common",
"datafusion-expr",
"datafusion-physical-expr",
- "hashbrown 0.12.3",
+ "hashbrown 0.13.1",
"log",
]
[[package]]
name = "datafusion-physical-expr"
version = "14.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=ebb24c5bf46f2af362aebffba2012875b328e799#ebb24c5bf46f2af362aebffba2012875b328e799"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=a61615b2949bea9027eefe686613605e135780f2#a61615b2949bea9027eefe686613605e135780f2"
dependencies = [
"ahash 0.8.2",
"arrow",
@@ -1332,7 +1334,7 @@ dependencies = [
"datafusion-expr",
"datafusion-row",
"half 2.1.0",
- "hashbrown 0.12.3",
+ "hashbrown 0.13.1",
"itertools",
"lazy_static",
"md-5",
@@ -1348,7 +1350,7 @@ dependencies = [
[[package]]
name = "datafusion-proto"
version = "14.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=ebb24c5bf46f2af362aebffba2012875b328e799#ebb24c5bf46f2af362aebffba2012875b328e799"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=a61615b2949bea9027eefe686613605e135780f2#a61615b2949bea9027eefe686613605e135780f2"
dependencies = [
"arrow",
"datafusion",
@@ -1362,7 +1364,7 @@ dependencies = [
[[package]]
name = "datafusion-row"
version = "14.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=ebb24c5bf46f2af362aebffba2012875b328e799#ebb24c5bf46f2af362aebffba2012875b328e799"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=a61615b2949bea9027eefe686613605e135780f2#a61615b2949bea9027eefe686613605e135780f2"
dependencies = [
"arrow",
"datafusion-common",
@@ -1373,12 +1375,12 @@ dependencies = [
[[package]]
name = "datafusion-sql"
version = "14.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=ebb24c5bf46f2af362aebffba2012875b328e799#ebb24c5bf46f2af362aebffba2012875b328e799"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=a61615b2949bea9027eefe686613605e135780f2#a61615b2949bea9027eefe686613605e135780f2"
dependencies = [
"arrow",
"datafusion-common",
"datafusion-expr",
- "sqlparser 0.26.0",
+ "sqlparser",
]
[[package]]
@@ -2395,7 +2397,7 @@ version = "0.1.0"
dependencies = [
"generated_types",
"snafu",
- "sqlparser 0.27.0",
+ "sqlparser",
"workspace-hack",
]
@@ -3066,6 +3068,17 @@ dependencies = [
"libc",
]
+[[package]]
+name = "lzma-sys"
+version = "0.1.20"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5fda04ab3764e6cde78b9974eec4f779acaba7c4e84b36eca3cf77c581b85d27"
+dependencies = [
+ "cc",
+ "libc",
+ "pkg-config",
+]
+
[[package]]
name = "matchers"
version = "0.1.0"
@@ -3903,7 +3916,7 @@ dependencies = [
"query_functions",
"schema",
"snafu",
- "sqlparser 0.27.0",
+ "sqlparser",
"test_helpers",
"workspace-hack",
]
@@ -5052,15 +5065,6 @@ dependencies = [
"unicode_categories",
]
-[[package]]
-name = "sqlparser"
-version = "0.26.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "86be66ea0b2b22749cfa157d16e2e84bf793e626a3375f4d378dc289fa03affb"
-dependencies = [
- "log",
-]
-
[[package]]
name = "sqlparser"
version = "0.27.0"
@@ -6288,7 +6292,6 @@ dependencies = [
name = "workspace-hack"
version = "0.1.0"
dependencies = [
- "ahash 0.7.6",
"ahash 0.8.2",
"arrow",
"base64",
@@ -6304,6 +6307,7 @@ dependencies = [
"either",
"fixedbitset",
"flatbuffers",
+ "flate2",
"futures-channel",
"futures-core",
"futures-io",
@@ -6416,6 +6420,15 @@ dependencies = [
"workspace-hack",
]
+[[package]]
+name = "xz2"
+version = "0.1.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "388c44dc09d76f1536602ead6d325eb532f5c122f17782bd57fb47baeeb767e2"
+dependencies = [
+ "lzma-sys",
+]
+
[[package]]
name = "yaml-rust"
version = "0.4.5"
diff --git a/Cargo.toml b/Cargo.toml
index 49d95fdb14..b62da9cf3d 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -113,8 +113,8 @@ license = "MIT OR Apache-2.0"
[workspace.dependencies]
arrow = { version = "27.0.0" }
arrow-flight = { version = "27.0.0" }
-datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev="ebb24c5bf46f2af362aebffba2012875b328e799", default-features = false }
-datafusion-proto = { git = "https://github.com/apache/arrow-datafusion.git", rev="ebb24c5bf46f2af362aebffba2012875b328e799" }
+datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev="a61615b2949bea9027eefe686613605e135780f2", default-features = false }
+datafusion-proto = { git = "https://github.com/apache/arrow-datafusion.git", rev="a61615b2949bea9027eefe686613605e135780f2" }
hashbrown = { version = "0.13.1" }
parquet = { version = "27.0.0" }
diff --git a/iox_query/src/frontend.rs b/iox_query/src/frontend.rs
index d442b448c0..a289660429 100644
--- a/iox_query/src/frontend.rs
+++ b/iox_query/src/frontend.rs
@@ -314,7 +314,7 @@ mod test {
if !(self.pred)(plan) {
return Ok(true);
}
- let metrics = plan.metrics().unwrap().aggregate_by_partition();
+ let metrics = plan.metrics().unwrap().aggregate_by_name();
let mut elapsed_compute: Option<metrics::Time> = None;
let mut output_rows: Option<metrics::Count> = None;
let mut start_timestamp: Option<metrics::Timestamp> = None;
diff --git a/query_functions/src/selectors.rs b/query_functions/src/selectors.rs
index 5feb4087be..48020f5c4a 100644
--- a/query_functions/src/selectors.rs
+++ b/query_functions/src/selectors.rs
@@ -393,6 +393,12 @@ trait Selector: Debug + Default + Send + Sync {
/// Update this selector's state based on values in value_arr and time_arr
fn update_batch(&mut self, value_arr: &ArrayRef, time_arr: &ArrayRef) -> DataFusionResult<()>;
+
+ /// Allocated size required for this selector, in bytes,
+ /// including `Self`. Allocated means that for internal
+ /// containers such as `Vec`, the `capacity` should be used not
+ /// the `len`
+ fn size(&self) -> usize;
}
/// Describes which part of the selector to return: the timestamp or
@@ -527,6 +533,14 @@ where
self.selector.datafusion_state()
}
+ /// Allocated size required for this accumulator, in bytes,
+ /// including `Self`. Allocated means that for internal
+ /// containers such as `Vec`, the `capacity` should be used not
+ /// the `len`
+ fn size(&self) -> usize {
+ std::mem::size_of_val(self) - std::mem::size_of_val(&self.selector) + self.selector.size()
+ }
+
// Return the final value of this aggregator.
fn evaluate(&self) -> DataFusionResult<ScalarValue> {
self.selector.evaluate(&self.output)
diff --git a/query_functions/src/selectors/internal.rs b/query_functions/src/selectors/internal.rs
index 25beff73e2..bfe5b3c570 100644
--- a/query_functions/src/selectors/internal.rs
+++ b/query_functions/src/selectors/internal.rs
@@ -225,6 +225,11 @@ macro_rules! make_first_selector {
Ok(())
}
+
+ fn size(&self) -> usize {
+ // no nested types
+ std::mem::size_of_val(self)
+ }
}
};
}
@@ -335,6 +340,11 @@ macro_rules! make_last_selector {
Ok(())
}
+
+ fn size(&self) -> usize {
+ // no nested types
+ std::mem::size_of_val(self)
+ }
}
};
}
@@ -476,6 +486,11 @@ macro_rules! make_min_selector {
}
Ok(())
}
+
+ fn size(&self) -> usize {
+ // no nested types
+ std::mem::size_of_val(self)
+ }
}
};
}
@@ -593,6 +608,11 @@ macro_rules! make_max_selector {
}
Ok(())
}
+
+ fn size(&self) -> usize {
+ // no nested types
+ std::mem::size_of_val(self)
+ }
}
};
}
diff --git a/query_tests/cases/in/dedup_and_predicates_parquet.expected b/query_tests/cases/in/dedup_and_predicates_parquet.expected
index aa4926c2a5..80b4c85b52 100644
--- a/query_tests/cases/in/dedup_and_predicates_parquet.expected
+++ b/query_tests/cases/in/dedup_and_predicates_parquet.expected
@@ -33,23 +33,23 @@
+-----+-----+-----+----------------------+
-- SQL: EXPLAIN SELECT * FROM "table" WHERE tag='A';
-- Results After Normalizing UUIDs
-+---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| plan_type | plan |
-+---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| logical_plan | Projection: table.bar, table.foo, table.tag, table.time |
-| | Filter: table.tag = Dictionary(Int32, Utf8("A")) |
-| | TableScan: table projection=[bar, foo, tag, time], partial_filters=[table.tag = Dictionary(Int32, Utf8("A"))] |
-| physical_plan | ProjectionExec: expr=[bar@0 as bar, foo@1 as foo, tag@2 as tag, time@3 as time] |
-| | CoalesceBatchesExec: target_batch_size=4096 |
-| | FilterExec: tag@2 = A |
-| | RepartitionExec: partitioning=RoundRobinBatch(4) |
-| | DeduplicateExec: [tag@2 ASC,time@3 ASC] |
-| | SortPreservingMergeExec: [tag@2 ASC,time@3 ASC] |
-| | UnionExec |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=tag_min@0 <= A AND A <= tag_max@1, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet], predicate=tag_min@0 <= A AND A <= tag_max@1, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] |
-| | |
-+---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
++---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| plan_type | plan |
++---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| logical_plan | Projection: table.bar, table.foo, table.tag, table.time |
+| | Filter: table.tag = Dictionary(Int32, Utf8("A")) |
+| | TableScan: table projection=[bar, foo, tag, time], partial_filters=[table.tag = Dictionary(Int32, Utf8("A"))] |
+| physical_plan | ProjectionExec: expr=[bar@0 as bar, foo@1 as foo, tag@2 as tag, time@3 as time] |
+| | CoalesceBatchesExec: target_batch_size=4096 |
+| | FilterExec: tag@2 = A |
+| | RepartitionExec: partitioning=RoundRobinBatch(4) |
+| | DeduplicateExec: [tag@2 ASC,time@3 ASC] |
+| | SortPreservingMergeExec: [tag@2 ASC,time@3 ASC] |
+| | UnionExec |
+| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=tag = Dictionary(Int32, Utf8("A")), pruning_predicate=tag_min@0 <= A AND A <= tag_max@1, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] |
+| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet], predicate=tag = Dictionary(Int32, Utf8("A")), pruning_predicate=tag_min@0 <= A AND A <= tag_max@1, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] |
+| | |
++---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-- SQL: SELECT * FROM "table" WHERE foo=1 AND bar=2;
+-----+-----+-----+----------------------+
| bar | foo | tag | time |
@@ -84,26 +84,26 @@
+-----+-----+-----+----------------------+
-- SQL: EXPLAIN SELECT * FROM "table" WHERE time=to_timestamp('1970-01-01T00:00:00.000000000+00:00') ORDER BY tag;
-- Results After Normalizing UUIDs
-+---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| plan_type | plan |
-+---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| logical_plan | Sort: table.tag ASC NULLS LAST |
-| | Projection: table.bar, table.foo, table.tag, table.time |
-| | Filter: table.time = TimestampNanosecond(0, None) |
-| | TableScan: table projection=[bar, foo, tag, time], partial_filters=[table.time = TimestampNanosecond(0, None)] |
-| physical_plan | SortExec: [tag@2 ASC NULLS LAST] |
-| | CoalescePartitionsExec |
-| | ProjectionExec: expr=[bar@0 as bar, foo@1 as foo, tag@2 as tag, time@3 as time] |
-| | CoalesceBatchesExec: target_batch_size=4096 |
-| | FilterExec: time@3 = 0 |
-| | RepartitionExec: partitioning=RoundRobinBatch(4) |
-| | DeduplicateExec: [tag@2 ASC,time@3 ASC] |
-| | SortPreservingMergeExec: [tag@2 ASC,time@3 ASC] |
-| | UnionExec |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=time_min@0 <= 0 AND 0 <= time_max@1, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet], predicate=time_min@0 <= 0 AND 0 <= time_max@1, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] |
-| | |
-+---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
++---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| plan_type | plan |
++---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| logical_plan | Sort: table.tag ASC NULLS LAST |
+| | Projection: table.bar, table.foo, table.tag, table.time |
+| | Filter: table.time = TimestampNanosecond(0, None) |
+| | TableScan: table projection=[bar, foo, tag, time], partial_filters=[table.time = TimestampNanosecond(0, None)] |
+| physical_plan | SortExec: [tag@2 ASC NULLS LAST] |
+| | CoalescePartitionsExec |
+| | ProjectionExec: expr=[bar@0 as bar, foo@1 as foo, tag@2 as tag, time@3 as time] |
+| | CoalesceBatchesExec: target_batch_size=4096 |
+| | FilterExec: time@3 = 0 |
+| | RepartitionExec: partitioning=RoundRobinBatch(4) |
+| | DeduplicateExec: [tag@2 ASC,time@3 ASC] |
+| | SortPreservingMergeExec: [tag@2 ASC,time@3 ASC] |
+| | UnionExec |
+| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=time = TimestampNanosecond(0, None), pruning_predicate=time_min@0 <= 0 AND 0 <= time_max@1, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] |
+| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet], predicate=time = TimestampNanosecond(0, None), pruning_predicate=time_min@0 <= 0 AND 0 <= time_max@1, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] |
+| | |
++---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-- SQL: SELECT * FROM "table" WHERE tag='A' AND foo=1 AND time=to_timestamp('1970-01-01T00:00:00.000000000+00:00');
+-----+-----+-----+----------------------+
| bar | foo | tag | time |
@@ -112,20 +112,20 @@
+-----+-----+-----+----------------------+
-- SQL: EXPLAIN SELECT * FROM "table" WHERE tag='A' AND foo=1 AND time=to_timestamp('1970-01-01T00:00:00.000000000+00:00');
-- Results After Normalizing UUIDs
-+---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| plan_type | plan |
-+---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| logical_plan | Projection: table.bar, table.foo, table.tag, table.time |
-| | Filter: table.tag = Dictionary(Int32, Utf8("A")) AND table.foo = Float64(1) AND table.time = TimestampNanosecond(0, None) |
-| | TableScan: table projection=[bar, foo, tag, time], partial_filters=[table.tag = Dictionary(Int32, Utf8("A")), table.foo = Float64(1), table.time = TimestampNanosecond(0, None)] |
-| physical_plan | ProjectionExec: expr=[bar@0 as bar, foo@1 as foo, tag@2 as tag, time@3 as time] |
-| | CoalesceBatchesExec: target_batch_size=4096 |
-| | FilterExec: tag@2 = A AND foo@1 = 1 AND time@3 = 0 |
-| | RepartitionExec: partitioning=RoundRobinBatch(4) |
-| | DeduplicateExec: [tag@2 ASC,time@3 ASC] |
-| | SortPreservingMergeExec: [tag@2 ASC,time@3 ASC] |
-| | UnionExec |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=tag_min@0 <= A AND A <= tag_max@1 AND time_min@2 <= 0 AND 0 <= time_max@3, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet], predicate=tag_min@0 <= A AND A <= tag_max@1 AND time_min@2 <= 0 AND 0 <= time_max@3, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] |
-| | |
-+---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
++---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| plan_type | plan |
++---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| logical_plan | Projection: table.bar, table.foo, table.tag, table.time |
+| | Filter: table.tag = Dictionary(Int32, Utf8("A")) AND table.foo = Float64(1) AND table.time = TimestampNanosecond(0, None) |
+| | TableScan: table projection=[bar, foo, tag, time], partial_filters=[table.tag = Dictionary(Int32, Utf8("A")), table.foo = Float64(1), table.time = TimestampNanosecond(0, None)] |
+| physical_plan | ProjectionExec: expr=[bar@0 as bar, foo@1 as foo, tag@2 as tag, time@3 as time] |
+| | CoalesceBatchesExec: target_batch_size=4096 |
+| | FilterExec: tag@2 = A AND foo@1 = 1 AND time@3 = 0 |
+| | RepartitionExec: partitioning=RoundRobinBatch(4) |
+| | DeduplicateExec: [tag@2 ASC,time@3 ASC] |
+| | SortPreservingMergeExec: [tag@2 ASC,time@3 ASC] |
+| | UnionExec |
+| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=tag = Dictionary(Int32, Utf8("A")) AND time = TimestampNanosecond(0, None), pruning_predicate=tag_min@0 <= A AND A <= tag_max@1 AND time_min@2 <= 0 AND 0 <= time_max@3, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] |
+| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet], predicate=tag = Dictionary(Int32, Utf8("A")) AND time = TimestampNanosecond(0, None), pruning_predicate=tag_min@0 <= A AND A <= tag_max@1 AND time_min@2 <= 0 AND 0 <= time_max@3, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] |
+| | |
++---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
diff --git a/query_tests/cases/in/dedup_and_predicates_parquet_ingester.expected b/query_tests/cases/in/dedup_and_predicates_parquet_ingester.expected
index b32c6d9fbb..3cb766c556 100644
--- a/query_tests/cases/in/dedup_and_predicates_parquet_ingester.expected
+++ b/query_tests/cases/in/dedup_and_predicates_parquet_ingester.expected
@@ -34,24 +34,24 @@
+-----+-----+-----+----------------------+
-- SQL: EXPLAIN SELECT * FROM "table" WHERE tag='A';
-- Results After Normalizing UUIDs
-+---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| plan_type | plan |
-+---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| logical_plan | Projection: table.bar, table.foo, table.tag, table.time |
-| | Filter: table.tag = Dictionary(Int32, Utf8("A")) |
-| | TableScan: table projection=[bar, foo, tag, time], partial_filters=[table.tag = Dictionary(Int32, Utf8("A"))] |
-| physical_plan | ProjectionExec: expr=[bar@0 as bar, foo@1 as foo, tag@2 as tag, time@3 as time] |
-| | CoalesceBatchesExec: target_batch_size=4096 |
-| | FilterExec: tag@2 = A |
-| | RepartitionExec: partitioning=RoundRobinBatch(4) |
-| | DeduplicateExec: [tag@2 ASC,time@3 ASC] |
-| | SortPreservingMergeExec: [tag@2 ASC,time@3 ASC] |
-| | UnionExec |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=tag_min@0 <= A AND A <= tag_max@1, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] |
-| | SortExec: [tag@2 ASC,time@3 ASC] |
-| | RecordBatchesExec: batches_groups=1 batches=1 |
-| | |
-+---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
++---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| plan_type | plan |
++---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| logical_plan | Projection: table.bar, table.foo, table.tag, table.time |
+| | Filter: table.tag = Dictionary(Int32, Utf8("A")) |
+| | TableScan: table projection=[bar, foo, tag, time], partial_filters=[table.tag = Dictionary(Int32, Utf8("A"))] |
+| physical_plan | ProjectionExec: expr=[bar@0 as bar, foo@1 as foo, tag@2 as tag, time@3 as time] |
+| | CoalesceBatchesExec: target_batch_size=4096 |
+| | FilterExec: tag@2 = A |
+| | RepartitionExec: partitioning=RoundRobinBatch(4) |
+| | DeduplicateExec: [tag@2 ASC,time@3 ASC] |
+| | SortPreservingMergeExec: [tag@2 ASC,time@3 ASC] |
+| | UnionExec |
+| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=tag = Dictionary(Int32, Utf8("A")), pruning_predicate=tag_min@0 <= A AND A <= tag_max@1, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] |
+| | SortExec: [tag@2 ASC,time@3 ASC] |
+| | RecordBatchesExec: batches_groups=1 batches=1 |
+| | |
++---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-- SQL: SELECT * FROM "table" WHERE foo=1 AND bar=2;
+-----+-----+-----+----------------------+
| bar | foo | tag | time |
@@ -87,27 +87,27 @@
+-----+-----+-----+----------------------+
-- SQL: EXPLAIN SELECT * FROM "table" WHERE time=to_timestamp('1970-01-01T00:00:00.000000000+00:00') ORDER BY tag;
-- Results After Normalizing UUIDs
-+---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| plan_type | plan |
-+---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| logical_plan | Sort: table.tag ASC NULLS LAST |
-| | Projection: table.bar, table.foo, table.tag, table.time |
-| | Filter: table.time = TimestampNanosecond(0, None) |
-| | TableScan: table projection=[bar, foo, tag, time], partial_filters=[table.time = TimestampNanosecond(0, None)] |
-| physical_plan | SortExec: [tag@2 ASC NULLS LAST] |
-| | CoalescePartitionsExec |
-| | ProjectionExec: expr=[bar@0 as bar, foo@1 as foo, tag@2 as tag, time@3 as time] |
-| | CoalesceBatchesExec: target_batch_size=4096 |
-| | FilterExec: time@3 = 0 |
-| | RepartitionExec: partitioning=RoundRobinBatch(4) |
-| | DeduplicateExec: [tag@2 ASC,time@3 ASC] |
-| | SortPreservingMergeExec: [tag@2 ASC,time@3 ASC] |
-| | UnionExec |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=time_min@0 <= 0 AND 0 <= time_max@1, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] |
-| | SortExec: [tag@2 ASC,time@3 ASC] |
-| | RecordBatchesExec: batches_groups=1 batches=1 |
-| | |
-+---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
++---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| plan_type | plan |
++---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| logical_plan | Sort: table.tag ASC NULLS LAST |
+| | Projection: table.bar, table.foo, table.tag, table.time |
+| | Filter: table.time = TimestampNanosecond(0, None) |
+| | TableScan: table projection=[bar, foo, tag, time], partial_filters=[table.time = TimestampNanosecond(0, None)] |
+| physical_plan | SortExec: [tag@2 ASC NULLS LAST] |
+| | CoalescePartitionsExec |
+| | ProjectionExec: expr=[bar@0 as bar, foo@1 as foo, tag@2 as tag, time@3 as time] |
+| | CoalesceBatchesExec: target_batch_size=4096 |
+| | FilterExec: time@3 = 0 |
+| | RepartitionExec: partitioning=RoundRobinBatch(4) |
+| | DeduplicateExec: [tag@2 ASC,time@3 ASC] |
+| | SortPreservingMergeExec: [tag@2 ASC,time@3 ASC] |
+| | UnionExec |
+| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=time = TimestampNanosecond(0, None), pruning_predicate=time_min@0 <= 0 AND 0 <= time_max@1, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] |
+| | SortExec: [tag@2 ASC,time@3 ASC] |
+| | RecordBatchesExec: batches_groups=1 batches=1 |
+| | |
++---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-- SQL: SELECT * FROM "table" WHERE tag='A' AND foo=1 AND time=to_timestamp('1970-01-01T00:00:00.000000000+00:00');
+-----+-----+-----+----------------------+
| bar | foo | tag | time |
@@ -116,21 +116,21 @@
+-----+-----+-----+----------------------+
-- SQL: EXPLAIN SELECT * FROM "table" WHERE tag='A' AND foo=1 AND time=to_timestamp('1970-01-01T00:00:00.000000000+00:00');
-- Results After Normalizing UUIDs
-+---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| plan_type | plan |
-+---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| logical_plan | Projection: table.bar, table.foo, table.tag, table.time |
-| | Filter: table.tag = Dictionary(Int32, Utf8("A")) AND table.foo = Float64(1) AND table.time = TimestampNanosecond(0, None) |
-| | TableScan: table projection=[bar, foo, tag, time], partial_filters=[table.tag = Dictionary(Int32, Utf8("A")), table.foo = Float64(1), table.time = TimestampNanosecond(0, None)] |
-| physical_plan | ProjectionExec: expr=[bar@0 as bar, foo@1 as foo, tag@2 as tag, time@3 as time] |
-| | CoalesceBatchesExec: target_batch_size=4096 |
-| | FilterExec: tag@2 = A AND foo@1 = 1 AND time@3 = 0 |
-| | RepartitionExec: partitioning=RoundRobinBatch(4) |
-| | DeduplicateExec: [tag@2 ASC,time@3 ASC] |
-| | SortPreservingMergeExec: [tag@2 ASC,time@3 ASC] |
-| | UnionExec |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=tag_min@0 <= A AND A <= tag_max@1 AND time_min@2 <= 0 AND 0 <= time_max@3, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] |
-| | SortExec: [tag@2 ASC,time@3 ASC] |
-| | RecordBatchesExec: batches_groups=1 batches=1 |
-| | |
-+---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
++---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| plan_type | plan |
++---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| logical_plan | Projection: table.bar, table.foo, table.tag, table.time |
+| | Filter: table.tag = Dictionary(Int32, Utf8("A")) AND table.foo = Float64(1) AND table.time = TimestampNanosecond(0, None) |
+| | TableScan: table projection=[bar, foo, tag, time], partial_filters=[table.tag = Dictionary(Int32, Utf8("A")), table.foo = Float64(1), table.time = TimestampNanosecond(0, None)] |
+| physical_plan | ProjectionExec: expr=[bar@0 as bar, foo@1 as foo, tag@2 as tag, time@3 as time] |
+| | CoalesceBatchesExec: target_batch_size=4096 |
+| | FilterExec: tag@2 = A AND foo@1 = 1 AND time@3 = 0 |
+| | RepartitionExec: partitioning=RoundRobinBatch(4) |
+| | DeduplicateExec: [tag@2 ASC,time@3 ASC] |
+| | SortPreservingMergeExec: [tag@2 ASC,time@3 ASC] |
+| | UnionExec |
+| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=tag = Dictionary(Int32, Utf8("A")) AND time = TimestampNanosecond(0, None), pruning_predicate=tag_min@0 <= A AND A <= tag_max@1 AND time_min@2 <= 0 AND 0 <= time_max@3, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] |
+| | SortExec: [tag@2 ASC,time@3 ASC] |
+| | RecordBatchesExec: batches_groups=1 batches=1 |
+| | |
++---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
diff --git a/query_tests/cases/in/duplicates_parquet.expected b/query_tests/cases/in/duplicates_parquet.expected
index c48378bfa3..81f7a0b091 100644
--- a/query_tests/cases/in/duplicates_parquet.expected
+++ b/query_tests/cases/in/duplicates_parquet.expected
@@ -86,13 +86,13 @@
| | ProjectionExec: expr=[area@0 as area, city@1 as city, max_temp@2 as max_temp, min_temp@3 as min_temp, state@4 as state, time@5 as time], metrics=[elapsed_compute=1.234ms, mem_used=0, output_rows=10, spill_count=0, spilled_bytes=0] |
| | CoalesceBatchesExec: target_batch_size=4096, metrics=[elapsed_compute=1.234ms, mem_used=0, output_rows=10, spill_count=0, spilled_bytes=0] |
| | FilterExec: state@4 = MA, metrics=[elapsed_compute=1.234ms, mem_used=0, output_rows=10, spill_count=0, spilled_bytes=0] |
-| | RepartitionExec: partitioning=RoundRobinBatch(4), metrics=[fetch_time{inputPartition=0}=1.234ms, repart_time{inputPartition=0}=1.234ms, send_time{inputPartition=0}=1.234ms] |
+| | RepartitionExec: partitioning=RoundRobinBatch(4), metrics=[fetch_time=1.234ms, repart_time=1.234ms, send_time=1.234ms] |
| | UnionExec, metrics=[elapsed_compute=1.234ms, mem_used=0, output_rows=10, spill_count=0, spilled_bytes=0] |
| | DeduplicateExec: [state@4 ASC,city@1 ASC,time@5 ASC], metrics=[elapsed_compute=1.234ms, mem_used=0, num_dupes=2, output_rows=5, spill_count=0, spilled_bytes=0] |
| | SortPreservingMergeExec: [state@4 ASC,city@1 ASC,time@5 ASC], metrics=[elapsed_compute=1.234ms, mem_used=0, output_rows=7, spill_count=0, spilled_bytes=0] |
| | UnionExec, metrics=[elapsed_compute=1.234ms, mem_used=0, output_rows=7, spill_count=0, spilled_bytes=0] |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=state_min@0 <= MA AND MA <= state_max@1, output_ordering=[state@4 ASC, city@1 ASC, time@5 ASC], projection=[area, city, max_temp, min_temp, state, time], metrics=[bytes_scanned{filename=1/1/1/1/00000000-0000-0000-0000-000000000000.parquet}=474, elapsed_compute=1.234ms, mem_used=0, num_predicate_creation_errors=0, output_rows=4, page_index_eval_time{filename=1/1/1/1/00000000-0000-0000-0000-000000000000.parquet}=1.234ms, page_index_rows_filtered{filename=1/1/1/1/00000000-0000-0000-0000-000000000000.parquet}=0, predicate_evaluation_errors{filename=1/1/1/1/00000000-0000-0000-0000-000000000000.parquet}=0, pushdown_eval_time{filename=1/1/1/1/00000000-0000-0000-0000-000000000000.parquet}=1.234ms, pushdown_rows_filtered{filename=1/1/1/1/00000000-0000-0000-0000-000000000000.parquet}=0, row_groups_pruned{filename=1/1/1/1/00000000-0000-0000-0000-000000000000.parquet}=0, spill_count=0, spilled_bytes=0, time_elapsed_opening=1.234ms, time_elapsed_processing=1.234ms, time_elapsed_scanning=1.234ms] |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet], predicate=state_min@0 <= MA AND MA <= state_max@1, output_ordering=[state@4 ASC, city@1 ASC, time@5 ASC], projection=[area, city, max_temp, min_temp, state, time], metrics=[bytes_scanned{filename=1/1/1/1/00000000-0000-0000-0000-000000000001.parquet}=632, elapsed_compute=1.234ms, mem_used=0, num_predicate_creation_errors=0, output_rows=3, page_index_eval_time{filename=1/1/1/1/00000000-0000-0000-0000-000000000001.parquet}=1.234ms, page_index_rows_filtered{filename=1/1/1/1/00000000-0000-0000-0000-000000000001.parquet}=0, predicate_evaluation_errors{filename=1/1/1/1/00000000-0000-0000-0000-000000000001.parquet}=0, pushdown_eval_time{filename=1/1/1/1/00000000-0000-0000-0000-000000000001.parquet}=1.234ms, pushdown_rows_filtered{filename=1/1/1/1/00000000-0000-0000-0000-000000000001.parquet}=3, row_groups_pruned{filename=1/1/1/1/00000000-0000-0000-0000-000000000001.parquet}=0, spill_count=0, spilled_bytes=0, time_elapsed_opening=1.234ms, time_elapsed_processing=1.234ms, time_elapsed_scanning=1.234ms] |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000002.parquet, 1/1/1/1/00000000-0000-0000-0000-000000000003.parquet], predicate=state_min@0 <= MA AND MA <= state_max@1, projection=[area, city, max_temp, min_temp, state, time], metrics=[bytes_scanned{filename=1/1/1/1/00000000-0000-0000-0000-000000000002.parquet}=591, bytes_scanned{filename=1/1/1/1/00000000-0000-0000-0000-000000000003.parquet}=628, elapsed_compute=1.234ms, mem_used=0, num_predicate_creation_errors=0, output_rows=5, page_index_eval_time{filename=1/1/1/1/00000000-0000-0000-0000-000000000002.parquet}=1.234ms, page_index_eval_time{filename=1/1/1/1/00000000-0000-0000-0000-000000000003.parquet}=1.234ms, page_index_rows_filtered{filename=1/1/1/1/00000000-0000-0000-0000-000000000002.parquet}=0, page_index_rows_filtered{filename=1/1/1/1/00000000-0000-0000-0000-000000000003.parquet}=0, predicate_evaluation_errors{filename=1/1/1/1/00000000-0000-0000-0000-000000000002.parquet}=0, predicate_evaluation_errors{filename=1/1/1/1/00000000-0000-0000-0000-000000000003.parquet}=0, pushdown_eval_time{filename=1/1/1/1/00000000-0000-0000-0000-000000000002.parquet}=1.234ms, pushdown_eval_time{filename=1/1/1/1/00000000-0000-0000-0000-000000000003.parquet}=1.234ms, pushdown_rows_filtered{filename=1/1/1/1/00000000-0000-0000-0000-000000000002.parquet}=2, pushdown_rows_filtered{filename=1/1/1/1/00000000-0000-0000-0000-000000000003.parquet}=3, row_groups_pruned{filename=1/1/1/1/00000000-0000-0000-0000-000000000002.parquet}=0, row_groups_pruned{filename=1/1/1/1/00000000-0000-0000-0000-000000000003.parquet}=0, spill_count=0, spilled_bytes=0, time_elapsed_opening=1.234ms, time_elapsed_processing=1.234ms, time_elapsed_scanning=1.234ms] |
+| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=state = Dictionary(Int32, Utf8("MA")), pruning_predicate=state_min@0 <= MA AND MA <= state_max@1, output_ordering=[state@4 ASC, city@1 ASC, time@5 ASC], projection=[area, city, max_temp, min_temp, state, time], metrics=[bytes_scanned=474, elapsed_compute=1.234ms, mem_used=0, num_predicate_creation_errors=0, output_rows=4, page_index_eval_time=1.234ms, page_index_rows_filtered=0, predicate_evaluation_errors=0, pushdown_eval_time=1.234ms, pushdown_rows_filtered=0, row_groups_pruned=0, spill_count=0, spilled_bytes=0, time_elapsed_opening=1.234ms, time_elapsed_processing=1.234ms, time_elapsed_scanning=1.234ms] |
+| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet], predicate=state = Dictionary(Int32, Utf8("MA")), pruning_predicate=state_min@0 <= MA AND MA <= state_max@1, output_ordering=[state@4 ASC, city@1 ASC, time@5 ASC], projection=[area, city, max_temp, min_temp, state, time], metrics=[bytes_scanned=632, elapsed_compute=1.234ms, mem_used=0, num_predicate_creation_errors=0, output_rows=3, page_index_eval_time=1.234ms, page_index_rows_filtered=0, predicate_evaluation_errors=0, pushdown_eval_time=1.234ms, pushdown_rows_filtered=3, row_groups_pruned=0, spill_count=0, spilled_bytes=0, time_elapsed_opening=1.234ms, time_elapsed_processing=1.234ms, time_elapsed_scanning=1.234ms] |
+| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000002.parquet, 1/1/1/1/00000000-0000-0000-0000-000000000003.parquet], predicate=state = Dictionary(Int32, Utf8("MA")), pruning_predicate=state_min@0 <= MA AND MA <= state_max@1, projection=[area, city, max_temp, min_temp, state, time], metrics=[bytes_scanned=1219, elapsed_compute=1.234ms, mem_used=0, num_predicate_creation_errors=0, output_rows=5, page_index_eval_time=1.234ms, page_index_rows_filtered=0, predicate_evaluation_errors=0, pushdown_eval_time=1.234ms, pushdown_rows_filtered=5, row_groups_pruned=0, spill_count=0, spilled_bytes=0, time_elapsed_opening=1.234ms, time_elapsed_processing=1.234ms, time_elapsed_scanning=1.234ms] |
| | |
----------
diff --git a/query_tests/cases/in/pushdown.expected b/query_tests/cases/in/pushdown.expected
index 3c1c6001ef..2fa10056e9 100644
--- a/query_tests/cases/in/pushdown.expected
+++ b/query_tests/cases/in/pushdown.expected
@@ -37,49 +37,49 @@
+-------+--------+--------------------------------+-----------+
-- SQL: EXPLAIN SELECT * from restaurant where count > 200;
-- Results After Normalizing UUIDs
-+---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| plan_type | plan |
-+---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town |
-| | Filter: restaurant.count > UInt64(200) |
-| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[restaurant.count > UInt64(200)] |
-| physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] |
-| | CoalesceBatchesExec: target_batch_size=4096 |
-| | FilterExec: count@0 > 200 |
-| | RepartitionExec: partitioning=RoundRobinBatch(4) |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=count_max@0 > 200, projection=[count, system, time, town] |
-| | |
-+---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
++---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| plan_type | plan |
++---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town |
+| | Filter: restaurant.count > UInt64(200) |
+| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[restaurant.count > UInt64(200)] |
+| physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] |
+| | CoalesceBatchesExec: target_batch_size=4096 |
+| | FilterExec: count@0 > 200 |
+| | RepartitionExec: partitioning=RoundRobinBatch(4) |
+| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=count > UInt64(200), pruning_predicate=count_max@0 > 200, projection=[count, system, time, town] |
+| | |
++---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-- SQL: EXPLAIN SELECT * from restaurant where count > 200.0;
-- Results After Normalizing UUIDs
-+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| plan_type | plan |
-+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town |
-| | Filter: CAST(restaurant.count AS Float64) > Float64(200) |
-| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[CAST(restaurant.count AS Float64) > Float64(200)] |
-| physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] |
-| | CoalesceBatchesExec: target_batch_size=4096 |
-| | FilterExec: CAST(count@0 AS Float64) > 200 |
-| | RepartitionExec: partitioning=RoundRobinBatch(4) |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=true, projection=[count, system, time, town] |
-| | |
-+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------+
++---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| plan_type | plan |
++---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town |
+| | Filter: CAST(restaurant.count AS Float64) > Float64(200) |
+| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[CAST(restaurant.count AS Float64) > Float64(200)] |
+| physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] |
+| | CoalesceBatchesExec: target_batch_size=4096 |
+| | FilterExec: CAST(count@0 AS Float64) > 200 |
+| | RepartitionExec: partitioning=RoundRobinBatch(4) |
+| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=CAST(count AS Float64) > Float64(200), projection=[count, system, time, town] |
+| | |
++---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-- SQL: EXPLAIN SELECT * from restaurant where system > 4.0;
-- Results After Normalizing UUIDs
-+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| plan_type | plan |
-+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town |
-| | Filter: restaurant.system > Float64(4) |
-| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[restaurant.system > Float64(4)] |
-| physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] |
-| | CoalesceBatchesExec: target_batch_size=4096 |
-| | FilterExec: system@1 > 4 |
-| | RepartitionExec: partitioning=RoundRobinBatch(4) |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=system_max@0 > 4, projection=[count, system, time, town] |
-| | |
-+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
++---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| plan_type | plan |
++---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town |
+| | Filter: restaurant.system > Float64(4) |
+| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[restaurant.system > Float64(4)] |
+| physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] |
+| | CoalesceBatchesExec: target_batch_size=4096 |
+| | FilterExec: system@1 > 4 |
+| | RepartitionExec: partitioning=RoundRobinBatch(4) |
+| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=system > Float64(4), pruning_predicate=system_max@0 > 4, projection=[count, system, time, town] |
+| | |
++---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-- SQL: SELECT * from restaurant where count > 200 and town != 'tewsbury';
-- Results After Sorting
+-------+--------+--------------------------------+-----------+
@@ -93,19 +93,19 @@
+-------+--------+--------------------------------+-----------+
-- SQL: EXPLAIN SELECT * from restaurant where count > 200 and town != 'tewsbury';
-- Results After Normalizing UUIDs
-+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| plan_type | plan |
-+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town |
-| | Filter: restaurant.count > UInt64(200) AND restaurant.town != Dictionary(Int32, Utf8("tewsbury")) |
-| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[restaurant.count > UInt64(200), restaurant.town != Dictionary(Int32, Utf8("tewsbury"))] |
-| physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] |
-| | CoalesceBatchesExec: target_batch_size=4096 |
-| | FilterExec: count@0 > 200 AND town@3 != tewsbury |
-| | RepartitionExec: partitioning=RoundRobinBatch(4) |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=count_max@0 > 200 AND town_min@1 != tewsbury OR tewsbury != town_max@2, projection=[count, system, time, town] |
-| | |
-+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
++---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| plan_type | plan |
++---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town |
+| | Filter: restaurant.count > UInt64(200) AND restaurant.town != Dictionary(Int32, Utf8("tewsbury")) |
+| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[restaurant.count > UInt64(200), restaurant.town != Dictionary(Int32, Utf8("tewsbury"))] |
+| physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] |
+| | CoalesceBatchesExec: target_batch_size=4096 |
+| | FilterExec: count@0 > 200 AND town@3 != tewsbury |
+| | RepartitionExec: partitioning=RoundRobinBatch(4) |
+| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=count > UInt64(200) AND town != Dictionary(Int32, Utf8("tewsbury")), pruning_predicate=count_max@0 > 200 AND town_min@1 != tewsbury OR tewsbury != town_max@2, projection=[count, system, time, town] |
+| | |
++---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-- SQL: SELECT * from restaurant where count > 200 and town != 'tewsbury' and (system =5 or town = 'lawrence');
-- Results After Sorting
+-------+--------+--------------------------------+-----------+
@@ -118,19 +118,19 @@
+-------+--------+--------------------------------+-----------+
-- SQL: EXPLAIN SELECT * from restaurant where count > 200 and town != 'tewsbury' and (system =5 or town = 'lawrence');
-- Results After Normalizing UUIDs
-+---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| plan_type | plan |
-+---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town |
-| | Filter: restaurant.count > UInt64(200) AND restaurant.town != Dictionary(Int32, Utf8("tewsbury")) AND (restaurant.system = Float64(5) OR restaurant.town = Dictionary(Int32, Utf8("lawrence"))) |
-| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[restaurant.count > UInt64(200), restaurant.town != Dictionary(Int32, Utf8("tewsbury")), restaurant.system = Float64(5) OR restaurant.town = Dictionary(Int32, Utf8("lawrence"))] |
-| physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] |
-| | CoalesceBatchesExec: target_batch_size=4096 |
-| | FilterExec: count@0 > 200 AND town@3 != tewsbury AND system@1 = 5 OR town@3 = lawrence |
-| | RepartitionExec: partitioning=RoundRobinBatch(4) |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=count_max@0 > 200 AND town_min@1 != tewsbury OR tewsbury != town_max@2 AND system_min@3 <= 5 AND 5 <= system_max@4 OR town_min@1 <= lawrence AND lawrence <= town_max@2, projection=[count, system, time, town] |
-| | |
-+---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
++---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| plan_type | plan |
++---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town |
+| | Filter: restaurant.count > UInt64(200) AND restaurant.town != Dictionary(Int32, Utf8("tewsbury")) AND (restaurant.system = Float64(5) OR restaurant.town = Dictionary(Int32, Utf8("lawrence"))) |
+| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[restaurant.count > UInt64(200), restaurant.town != Dictionary(Int32, Utf8("tewsbury")), restaurant.system = Float64(5) OR restaurant.town = Dictionary(Int32, Utf8("lawrence"))] |
+| physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] |
+| | CoalesceBatchesExec: target_batch_size=4096 |
+| | FilterExec: count@0 > 200 AND town@3 != tewsbury AND system@1 = 5 OR town@3 = lawrence |
+| | RepartitionExec: partitioning=RoundRobinBatch(4) |
+| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=count > UInt64(200) AND town != Dictionary(Int32, Utf8("tewsbury")) AND (system = Float64(5) OR town = Dictionary(Int32, Utf8("lawrence"))), pruning_predicate=count_max@0 > 200 AND town_min@1 != tewsbury OR tewsbury != town_max@2 AND system_min@3 <= 5 AND 5 <= system_max@4 OR town_min@1 <= lawrence AND lawrence <= town_max@2, projection=[count, system, time, town] |
+| | |
++---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-- SQL: SELECT * from restaurant where count > 200 and town != 'tewsbury' and (system =5 or town = 'lawrence') and count < 40000;
-- Results After Sorting
+-------+--------+--------------------------------+-----------+
@@ -142,19 +142,19 @@
+-------+--------+--------------------------------+-----------+
-- SQL: EXPLAIN SELECT * from restaurant where count > 200 and town != 'tewsbury' and (system =5 or town = 'lawrence') and count < 40000;
-- Results After Normalizing UUIDs
-+---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| plan_type | plan |
-+---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town |
-| | Filter: restaurant.count > UInt64(200) AND restaurant.town != Dictionary(Int32, Utf8("tewsbury")) AND (restaurant.system = Float64(5) OR restaurant.town = Dictionary(Int32, Utf8("lawrence"))) AND restaurant.count < UInt64(40000) |
-| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[restaurant.count > UInt64(200), restaurant.town != Dictionary(Int32, Utf8("tewsbury")), restaurant.system = Float64(5) OR restaurant.town = Dictionary(Int32, Utf8("lawrence")), restaurant.count < UInt64(40000)] |
-| physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] |
-| | CoalesceBatchesExec: target_batch_size=4096 |
-| | FilterExec: count@0 > 200 AND town@3 != tewsbury AND system@1 = 5 OR town@3 = lawrence AND count@0 < 40000 |
-| | RepartitionExec: partitioning=RoundRobinBatch(4) |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=count_max@0 > 200 AND town_min@1 != tewsbury OR tewsbury != town_max@2 AND system_min@3 <= 5 AND 5 <= system_max@4 OR town_min@1 <= lawrence AND lawrence <= town_max@2 AND count_min@5 < 40000, projection=[count, system, time, town] |
-| | |
-+---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
++---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| plan_type | plan |
++---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town |
+| | Filter: restaurant.count > UInt64(200) AND restaurant.town != Dictionary(Int32, Utf8("tewsbury")) AND (restaurant.system = Float64(5) OR restaurant.town = Dictionary(Int32, Utf8("lawrence"))) AND restaurant.count < UInt64(40000) |
+| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[restaurant.count > UInt64(200), restaurant.town != Dictionary(Int32, Utf8("tewsbury")), restaurant.system = Float64(5) OR restaurant.town = Dictionary(Int32, Utf8("lawrence")), restaurant.count < UInt64(40000)] |
+| physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] |
+| | CoalesceBatchesExec: target_batch_size=4096 |
+| | FilterExec: count@0 > 200 AND town@3 != tewsbury AND system@1 = 5 OR town@3 = lawrence AND count@0 < 40000 |
+| | RepartitionExec: partitioning=RoundRobinBatch(4) |
+| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=count > UInt64(200) AND town != Dictionary(Int32, Utf8("tewsbury")) AND (system = Float64(5) OR town = Dictionary(Int32, Utf8("lawrence"))) AND count < UInt64(40000), pruning_predicate=count_max@0 > 200 AND town_min@1 != tewsbury OR tewsbury != town_max@2 AND system_min@3 <= 5 AND 5 <= system_max@4 OR town_min@1 <= lawrence AND lawrence <= town_max@2 AND count_min@5 < 40000, projection=[count, system, time, town] |
+| | |
++---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-- SQL: SELECT * from restaurant where count > 200 and count < 40000;
-- Results After Sorting
+-------+--------+--------------------------------+-----------+
@@ -168,19 +168,19 @@
+-------+--------+--------------------------------+-----------+
-- SQL: EXPLAIN SELECT * from restaurant where count > 200 and count < 40000;
-- Results After Normalizing UUIDs
-+---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| plan_type | plan |
-+---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town |
-| | Filter: restaurant.count > UInt64(200) AND restaurant.count < UInt64(40000) |
-| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[restaurant.count > UInt64(200), restaurant.count < UInt64(40000)] |
-| physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] |
-| | CoalesceBatchesExec: target_batch_size=4096 |
-| | FilterExec: count@0 > 200 AND count@0 < 40000 |
-| | RepartitionExec: partitioning=RoundRobinBatch(4) |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=count_max@0 > 200 AND count_min@1 < 40000, projection=[count, system, time, town] |
-| | |
-+---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
++---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| plan_type | plan |
++---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town |
+| | Filter: restaurant.count > UInt64(200) AND restaurant.count < UInt64(40000) |
+| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[restaurant.count > UInt64(200), restaurant.count < UInt64(40000)] |
+| physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] |
+| | CoalesceBatchesExec: target_batch_size=4096 |
+| | FilterExec: count@0 > 200 AND count@0 < 40000 |
+| | RepartitionExec: partitioning=RoundRobinBatch(4) |
+| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=count > UInt64(200) AND count < UInt64(40000), pruning_predicate=count_max@0 > 200 AND count_min@1 < 40000, projection=[count, system, time, town] |
+| | |
++---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-- SQL: SELECT * from restaurant where system > 4.0 and system < 7.0;
-- Results After Sorting
+-------+--------+--------------------------------+-----------+
@@ -195,19 +195,19 @@
+-------+--------+--------------------------------+-----------+
-- SQL: EXPLAIN SELECT * from restaurant where system > 4.0 and system < 7.0;
-- Results After Normalizing UUIDs
-+---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| plan_type | plan |
-+---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town |
-| | Filter: restaurant.system > Float64(4) AND restaurant.system < Float64(7) |
-| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[restaurant.system > Float64(4), restaurant.system < Float64(7)] |
-| physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] |
-| | CoalesceBatchesExec: target_batch_size=4096 |
-| | FilterExec: system@1 > 4 AND system@1 < 7 |
-| | RepartitionExec: partitioning=RoundRobinBatch(4) |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=system_max@0 > 4 AND system_min@1 < 7, projection=[count, system, time, town] |
-| | |
-+---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
++---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| plan_type | plan |
++---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town |
+| | Filter: restaurant.system > Float64(4) AND restaurant.system < Float64(7) |
+| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[restaurant.system > Float64(4), restaurant.system < Float64(7)] |
+| physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] |
+| | CoalesceBatchesExec: target_batch_size=4096 |
+| | FilterExec: system@1 > 4 AND system@1 < 7 |
+| | RepartitionExec: partitioning=RoundRobinBatch(4) |
+| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=system > Float64(4) AND system < Float64(7), pruning_predicate=system_max@0 > 4 AND system_min@1 < 7, projection=[count, system, time, town] |
+| | |
++---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-- SQL: SELECT * from restaurant where system > 5.0 and system < 7.0;
-- Results After Sorting
+-------+--------+--------------------------------+----------+
@@ -219,19 +219,19 @@
+-------+--------+--------------------------------+----------+
-- SQL: EXPLAIN SELECT * from restaurant where system > 5.0 and system < 7.0;
-- Results After Normalizing UUIDs
-+---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| plan_type | plan |
-+---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town |
-| | Filter: restaurant.system > Float64(5) AND restaurant.system < Float64(7) |
-| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[restaurant.system > Float64(5), restaurant.system < Float64(7)] |
-| physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] |
-| | CoalesceBatchesExec: target_batch_size=4096 |
-| | FilterExec: system@1 > 5 AND system@1 < 7 |
-| | RepartitionExec: partitioning=RoundRobinBatch(4) |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=system_max@0 > 5 AND system_min@1 < 7, projection=[count, system, time, town] |
-| | |
-+---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
++---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| plan_type | plan |
++---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town |
+| | Filter: restaurant.system > Float64(5) AND restaurant.system < Float64(7) |
+| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[restaurant.system > Float64(5), restaurant.system < Float64(7)] |
+| physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] |
+| | CoalesceBatchesExec: target_batch_size=4096 |
+| | FilterExec: system@1 > 5 AND system@1 < 7 |
+| | RepartitionExec: partitioning=RoundRobinBatch(4) |
+| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=system > Float64(5) AND system < Float64(7), pruning_predicate=system_max@0 > 5 AND system_min@1 < 7, projection=[count, system, time, town] |
+| | |
++---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-- SQL: SELECT * from restaurant where system > 5.0 and town != 'tewsbury' and 7.0 > system;
-- Results After Sorting
+-------+--------+--------------------------------+----------+
@@ -242,19 +242,19 @@
+-------+--------+--------------------------------+----------+
-- SQL: EXPLAIN SELECT * from restaurant where system > 5.0 and town != 'tewsbury' and 7.0 > system;
-- Results After Normalizing UUIDs
-+---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| plan_type | plan |
-+---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town |
-| | Filter: restaurant.system > Float64(5) AND restaurant.town != Dictionary(Int32, Utf8("tewsbury")) AND Float64(7) > restaurant.system |
-| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[restaurant.system > Float64(5), restaurant.town != Dictionary(Int32, Utf8("tewsbury")), Float64(7) > restaurant.system] |
-| physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] |
-| | CoalesceBatchesExec: target_batch_size=4096 |
-| | FilterExec: system@1 > 5 AND town@3 != tewsbury AND 7 > system@1 |
-| | RepartitionExec: partitioning=RoundRobinBatch(4) |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=system_max@0 > 5 AND town_min@1 != tewsbury OR tewsbury != town_max@2 AND system_min@3 < 7, projection=[count, system, time, town] |
-| | |
-+---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
++---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| plan_type | plan |
++---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town |
+| | Filter: restaurant.system > Float64(5) AND restaurant.town != Dictionary(Int32, Utf8("tewsbury")) AND Float64(7) > restaurant.system |
+| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[restaurant.system > Float64(5), restaurant.town != Dictionary(Int32, Utf8("tewsbury")), Float64(7) > restaurant.system] |
+| physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] |
+| | CoalesceBatchesExec: target_batch_size=4096 |
+| | FilterExec: system@1 > 5 AND town@3 != tewsbury AND 7 > system@1 |
+| | RepartitionExec: partitioning=RoundRobinBatch(4) |
+| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=system > Float64(5) AND town != Dictionary(Int32, Utf8("tewsbury")) AND Float64(7) > system, pruning_predicate=system_max@0 > 5 AND town_min@1 != tewsbury OR tewsbury != town_max@2 AND system_min@3 < 7, projection=[count, system, time, town] |
+| | |
++---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-- SQL: SELECT * from restaurant where system > 5.0 and 'tewsbury' != town and system < 7.0 and (count = 632 or town = 'reading');
-- Results After Sorting
+-------+--------+--------------------------------+---------+
@@ -264,19 +264,19 @@
+-------+--------+--------------------------------+---------+
-- SQL: EXPLAIN SELECT * from restaurant where system > 5.0 and 'tewsbury' != town and system < 7.0 and (count = 632 or town = 'reading');
-- Results After Normalizing UUIDs
-+---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| plan_type | plan |
-+---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town |
-| | Filter: restaurant.system > Float64(5) AND Dictionary(Int32, Utf8("tewsbury")) != restaurant.town AND restaurant.system < Float64(7) AND (restaurant.count = UInt64(632) OR restaurant.town = Dictionary(Int32, Utf8("reading"))) |
-| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[restaurant.system > Float64(5), Dictionary(Int32, Utf8("tewsbury")) != restaurant.town, restaurant.system < Float64(7), restaurant.count = UInt64(632) OR restaurant.town = Dictionary(Int32, Utf8("reading"))] |
-| physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] |
-| | CoalesceBatchesExec: target_batch_size=4096 |
-| | FilterExec: system@1 > 5 AND tewsbury != town@3 AND system@1 < 7 AND count@0 = 632 OR town@3 = reading |
-| | RepartitionExec: partitioning=RoundRobinBatch(4) |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=system_max@0 > 5 AND town_min@1 != tewsbury OR tewsbury != town_max@2 AND system_min@3 < 7 AND count_min@4 <= 632 AND 632 <= count_max@5 OR town_min@1 <= reading AND reading <= town_max@2, projection=[count, system, time, town] |
-| | |
-+---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
++---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| plan_type | plan |
++---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town |
+| | Filter: restaurant.system > Float64(5) AND Dictionary(Int32, Utf8("tewsbury")) != restaurant.town AND restaurant.system < Float64(7) AND (restaurant.count = UInt64(632) OR restaurant.town = Dictionary(Int32, Utf8("reading"))) |
+| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[restaurant.system > Float64(5), Dictionary(Int32, Utf8("tewsbury")) != restaurant.town, restaurant.system < Float64(7), restaurant.count = UInt64(632) OR restaurant.town = Dictionary(Int32, Utf8("reading"))] |
+| physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] |
+| | CoalesceBatchesExec: target_batch_size=4096 |
+| | FilterExec: system@1 > 5 AND tewsbury != town@3 AND system@1 < 7 AND count@0 = 632 OR town@3 = reading |
+| | RepartitionExec: partitioning=RoundRobinBatch(4) |
+| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=system > Float64(5) AND Dictionary(Int32, Utf8("tewsbury")) != town AND system < Float64(7) AND (count = UInt64(632) OR town = Dictionary(Int32, Utf8("reading"))), pruning_predicate=system_max@0 > 5 AND town_min@1 != tewsbury OR tewsbury != town_max@2 AND system_min@3 < 7 AND count_min@4 <= 632 AND 632 <= count_max@5 OR town_min@1 <= reading AND reading <= town_max@2, projection=[count, system, time, town] |
+| | |
++---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-- SQL: SELECT * from restaurant where 5.0 < system and town != 'tewsbury' and system < 7.0 and (count = 632 or town = 'reading') and time > to_timestamp('1970-01-01T00:00:00.000000130+00:00');
-- Results After Sorting
++
diff --git a/query_tests/cases/in/retention.expected b/query_tests/cases/in/retention.expected
index 7adfb68667..7bfbcdec45 100644
--- a/query_tests/cases/in/retention.expected
+++ b/query_tests/cases/in/retention.expected
@@ -42,30 +42,30 @@
+------+------+----------------------+
-- SQL: EXPLAIN SELECT * FROM cpu WHERE host != 'b' ORDER BY host,time;
-- Results After Normalizing UUIDs
-+---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| plan_type | plan |
-+---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| logical_plan | Sort: cpu.host ASC NULLS LAST, cpu.time ASC NULLS LAST |
-| | Projection: cpu.host, cpu.load, cpu.time |
-| | Filter: cpu.host != Dictionary(Int32, Utf8("b")) |
-| | TableScan: cpu projection=[host, load, time], partial_filters=[cpu.host != Dictionary(Int32, Utf8("b"))] |
-| physical_plan | SortExec: [host@0 ASC NULLS LAST,time@2 ASC NULLS LAST] |
-| | CoalescePartitionsExec |
-| | ProjectionExec: expr=[host@0 as host, load@1 as load, time@2 as time] |
-| | CoalesceBatchesExec: target_batch_size=4096 |
-| | FilterExec: host@0 != b |
-| | RepartitionExec: partitioning=RoundRobinBatch(4) |
-| | DeduplicateExec: [host@0 ASC,time@2 ASC] |
-| | SortPreservingMergeExec: [host@0 ASC,time@2 ASC] |
-| | SortExec: [host@0 ASC,time@2 ASC] |
-| | UnionExec |
-| | CoalesceBatchesExec: target_batch_size=4096 |
-| | FilterExec: time@2 < -9223372036854775808 OR time@2 > -3600000000000 |
-| | RepartitionExec: partitioning=RoundRobinBatch(4) |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=host_min@0 != b OR b != host_max@1, output_ordering=[host@0 ASC, time@2 ASC], projection=[host, load, time] |
-| | CoalesceBatchesExec: target_batch_size=4096 |
-| | FilterExec: time@2 < -9223372036854775808 OR time@2 > -3600000000000 |
-| | RepartitionExec: partitioning=RoundRobinBatch(4) |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet], predicate=host_min@0 != b OR b != host_max@1, output_ordering=[host@0 ASC, time@2 ASC], projection=[host, load, time] |
-| | |
-+---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
++---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| plan_type | plan |
++---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| logical_plan | Sort: cpu.host ASC NULLS LAST, cpu.time ASC NULLS LAST |
+| | Projection: cpu.host, cpu.load, cpu.time |
+| | Filter: cpu.host != Dictionary(Int32, Utf8("b")) |
+| | TableScan: cpu projection=[host, load, time], partial_filters=[cpu.host != Dictionary(Int32, Utf8("b"))] |
+| physical_plan | SortExec: [host@0 ASC NULLS LAST,time@2 ASC NULLS LAST] |
+| | CoalescePartitionsExec |
+| | ProjectionExec: expr=[host@0 as host, load@1 as load, time@2 as time] |
+| | CoalesceBatchesExec: target_batch_size=4096 |
+| | FilterExec: host@0 != b |
+| | RepartitionExec: partitioning=RoundRobinBatch(4) |
+| | DeduplicateExec: [host@0 ASC,time@2 ASC] |
+| | SortPreservingMergeExec: [host@0 ASC,time@2 ASC] |
+| | SortExec: [host@0 ASC,time@2 ASC] |
+| | UnionExec |
+| | CoalesceBatchesExec: target_batch_size=4096 |
+| | FilterExec: time@2 < -9223372036854775808 OR time@2 > -3600000000000 |
+| | RepartitionExec: partitioning=RoundRobinBatch(4) |
+| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=host != Dictionary(Int32, Utf8("b")), pruning_predicate=host_min@0 != b OR b != host_max@1, output_ordering=[host@0 ASC, time@2 ASC], projection=[host, load, time] |
+| | CoalesceBatchesExec: target_batch_size=4096 |
+| | FilterExec: time@2 < -9223372036854775808 OR time@2 > -3600000000000 |
+| | RepartitionExec: partitioning=RoundRobinBatch(4) |
+| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet], predicate=host != Dictionary(Int32, Utf8("b")), pruning_predicate=host_min@0 != b OR b != host_max@1, output_ordering=[host@0 ASC, time@2 ASC], projection=[host, load, time] |
+| | |
++---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
diff --git a/query_tests/cases/in/several_chunks.expected b/query_tests/cases/in/several_chunks.expected
index 808413a60b..b1f8640a5a 100644
--- a/query_tests/cases/in/several_chunks.expected
+++ b/query_tests/cases/in/several_chunks.expected
@@ -73,25 +73,25 @@
+---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-- SQL: EXPLAIN SELECT * from h2o where time >= to_timestamp('1970-01-01T00:00:00.000000250+00:00');
-- Results After Normalizing UUIDs
-+---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| plan_type | plan |
-+---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| logical_plan | Projection: h2o.city, h2o.other_temp, h2o.state, h2o.temp, h2o.time |
-| | Filter: h2o.time >= TimestampNanosecond(250, None) |
-| | TableScan: h2o projection=[city, other_temp, state, temp, time], partial_filters=[h2o.time >= TimestampNanosecond(250, None)] |
-| physical_plan | ProjectionExec: expr=[city@0 as city, other_temp@1 as other_temp, state@2 as state, temp@3 as temp, time@4 as time] |
-| | CoalesceBatchesExec: target_batch_size=4096 |
-| | FilterExec: time@4 >= 250 |
-| | RepartitionExec: partitioning=RoundRobinBatch(4) |
-| | UnionExec |
-| | DeduplicateExec: [city@0 ASC,state@2 ASC,time@4 ASC] |
-| | SortPreservingMergeExec: [city@0 ASC,state@2 ASC,time@4 ASC] |
-| | UnionExec |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=time_max@0 >= 250, output_ordering=[city@0 ASC, state@2 ASC, time@4 ASC], projection=[city, other_temp, state, temp, time] |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet], predicate=time_max@0 >= 250, output_ordering=[city@0 ASC, state@2 ASC, time@4 ASC], projection=[city, other_temp, state, temp, time] |
-| | DeduplicateExec: [city@0 ASC,state@2 ASC,time@4 ASC] |
-| | SortExec: [city@0 ASC,state@2 ASC,time@4 ASC] |
-| | RecordBatchesExec: batches_groups=1 batches=1 |
-| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000002.parquet], predicate=time_max@0 >= 250, projection=[city, other_temp, state, temp, time] |
-| | |
-+---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
++---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| plan_type | plan |
++---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| logical_plan | Projection: h2o.city, h2o.other_temp, h2o.state, h2o.temp, h2o.time |
+| | Filter: h2o.time >= TimestampNanosecond(250, None) |
+| | TableScan: h2o projection=[city, other_temp, state, temp, time], partial_filters=[h2o.time >= TimestampNanosecond(250, None)] |
+| physical_plan | ProjectionExec: expr=[city@0 as city, other_temp@1 as other_temp, state@2 as state, temp@3 as temp, time@4 as time] |
+| | CoalesceBatchesExec: target_batch_size=4096 |
+| | FilterExec: time@4 >= 250 |
+| | RepartitionExec: partitioning=RoundRobinBatch(4) |
+| | UnionExec |
+| | DeduplicateExec: [city@0 ASC,state@2 ASC,time@4 ASC] |
+| | SortPreservingMergeExec: [city@0 ASC,state@2 ASC,time@4 ASC] |
+| | UnionExec |
+| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=time >= TimestampNanosecond(250, None), pruning_predicate=time_max@0 >= 250, output_ordering=[city@0 ASC, state@2 ASC, time@4 ASC], projection=[city, other_temp, state, temp, time] |
+| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet], predicate=time >= TimestampNanosecond(250, None), pruning_predicate=time_max@0 >= 250, output_ordering=[city@0 ASC, state@2 ASC, time@4 ASC], projection=[city, other_temp, state, temp, time] |
+| | DeduplicateExec: [city@0 ASC,state@2 ASC,time@4 ASC] |
+| | SortExec: [city@0 ASC,state@2 ASC,time@4 ASC] |
+| | RecordBatchesExec: batches_groups=1 batches=1 |
+| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000002.parquet], predicate=time >= TimestampNanosecond(250, None), pruning_predicate=time_max@0 >= 250, projection=[city, other_temp, state, temp, time] |
+| | |
++---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml
index 1030140964..e08575a8db 100644
--- a/workspace-hack/Cargo.toml
+++ b/workspace-hack/Cargo.toml
@@ -16,8 +16,7 @@ license.workspace = true
### BEGIN HAKARI SECTION
[dependencies]
-ahash-ca01ad9e24f5d932 = { package = "ahash", version = "0.7", features = ["std"] }
-ahash-c38e5c1d305a1b54 = { package = "ahash", version = "0.8", default-features = false, features = ["compile-time-rng", "const-random", "getrandom", "runtime-rng"] }
+ahash = { version = "0.8", default-features = false, features = ["compile-time-rng", "const-random", "getrandom", "runtime-rng"] }
arrow = { version = "27", features = ["arrow-csv", "arrow-ipc", "arrow-json", "comfy-table", "csv", "dyn_cmp_dict", "ipc", "json", "prettyprint"] }
base64 = { version = "0.13", features = ["std"] }
bitflags = { version = "1" }
@@ -26,11 +25,12 @@ bytes = { version = "1", features = ["std"] }
chrono = { version = "0.4", default-features = false, features = ["alloc", "clock", "iana-time-zone", "serde", "std", "winapi"] }
crossbeam-utils = { version = "0.8", features = ["std"] }
crypto-common = { version = "0.1", default-features = false, features = ["std"] }
-datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "ebb24c5bf46f2af362aebffba2012875b328e799", features = ["crypto_expressions", "regex_expressions", "unicode_expressions"] }
+datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "a61615b2949bea9027eefe686613605e135780f2", features = ["async-compression", "bzip2", "compression", "crypto_expressions", "flate2", "regex_expressions", "unicode_expressions", "xz2"] }
digest = { version = "0.10", features = ["alloc", "block-buffer", "core-api", "mac", "std", "subtle"] }
either = { version = "1", features = ["use_std"] }
fixedbitset = { version = "0.4", features = ["std"] }
flatbuffers = { version = "22", features = ["thiserror"] }
+flate2 = { version = "1", features = ["miniz_oxide", "rust_backend"] }
futures-channel = { version = "0.3", features = ["alloc", "futures-sink", "sink", "std"] }
futures-core = { version = "0.3", features = ["alloc", "std"] }
futures-io = { version = "0.3", features = ["std"] }
@@ -39,7 +39,7 @@ futures-task = { version = "0.3", default-features = false, features = ["alloc",
futures-util = { version = "0.3", features = ["alloc", "async-await", "async-await-macro", "channel", "futures-channel", "futures-io", "futures-macro", "futures-sink", "io", "memchr", "sink", "slab", "std"] }
getrandom = { version = "0.2", default-features = false, features = ["std"] }
hashbrown-5ef9efb8ec2df382 = { package = "hashbrown", version = "0.12", features = ["ahash", "inline-more", "raw"] }
-hashbrown-594e8ee84c453af0 = { package = "hashbrown", version = "0.13", features = ["ahash", "inline-more"] }
+hashbrown-594e8ee84c453af0 = { package = "hashbrown", version = "0.13", features = ["ahash", "inline-more", "raw"] }
indexmap = { version = "1", default-features = false, features = ["std"] }
libc = { version = "0.2", features = ["extra_traits", "std"] }
lock_api = { version = "0.4", default-features = false, features = ["arc_lock"] }
@@ -87,7 +87,6 @@ zstd-safe = { version = "5", default-features = false, features = ["arrays", "le
zstd-sys = { version = "2", default-features = false, features = ["legacy", "std", "zdict_builder"] }
[build-dependencies]
-ahash-ca01ad9e24f5d932 = { package = "ahash", version = "0.7", features = ["std"] }
base64 = { version = "0.13", features = ["std"] }
bitflags = { version = "1" }
byteorder = { version = "1", features = ["std"] }
|
5ca7bd58f43b40a6aee1eba69f7ac0b0bbc9b202
|
Fraser Savage
|
2023-06-15 16:27:23
|
Implement `From<DmlOperation>` for `IngestOp`
|
This commit implements the `From` trait to allow quick conversion from
`DmlOperation::DmlWrite` to `IngestOp::WriteOperation`. This conversion
performs some copies and should be removed once the RPC write path has
been switched to use `IngestOp`.
| null |
refactor(ingester): Implement `From<DmlOperation>` for `IngestOp`
This commit implements the `From` trait to allow quick conversion from
`DmlOperation::DmlWrite` to `IngestOp::WriteOperation`. This conversion
performs some copies and should be removed once the RPC write path has
been switched to use `IngestOp`.
|
diff --git a/ingester/src/dml_payload/ingest_op.rs b/ingester/src/dml_payload/ingest_op.rs
index 0fdce56110..a7086c3998 100644
--- a/ingester/src/dml_payload/ingest_op.rs
+++ b/ingester/src/dml_payload/ingest_op.rs
@@ -1,4 +1,5 @@
use data_types::{NamespaceId, PartitionKey, SequenceNumber, TableId};
+use dml::{DmlOperation, DmlWrite};
use hashbrown::HashMap;
use mutable_batch::MutableBatch;
use trace::ctx::SpanContext;
@@ -9,6 +10,17 @@ pub enum IngestOp {
Write(WriteOperation),
}
+impl From<DmlOperation> for IngestOp {
+ fn from(value: DmlOperation) -> Self {
+ match value {
+ DmlOperation::Write(w) => Self::Write(WriteOperation::from(w)),
+ DmlOperation::Delete(_) => {
+ panic!("no corresponding ingest operation exists for DML delete")
+ }
+ }
+ }
+}
+
/// A decoded representation of the data contained by an RPC write
/// represented by an [`IngestOp::Write`]
pub struct WriteOperation {
@@ -45,6 +57,42 @@ impl WriteOperation {
}
}
+// TODO(savage): Temporary [`From`] implementation to assist in switchover
+// within ingester code. This should be removed in favour of constructing all
+// [`WriteOperation`]s directly
+impl From<DmlWrite> for WriteOperation {
+ fn from(dml_write: DmlWrite) -> Self {
+ let namespace_id = dml_write.namespace_id();
+ let partition_key = dml_write.partition_key().to_owned();
+ let sequence_number = dml_write
+ .meta()
+ .sequence()
+ .expect("tried to create write operation from unsequenced DML write");
+ let span_context = dml_write.meta().span_context().map(SpanContext::to_owned);
+
+ Self::new(
+ namespace_id,
+ dml_write
+ .into_tables()
+ .map(|(table, data)| {
+ (
+ table,
+ TableData {
+ table,
+ partitioned_data: PartitionedData {
+ sequence_number,
+ data,
+ },
+ },
+ )
+ })
+ .collect(),
+ partition_key,
+ span_context,
+ )
+ }
+}
+
/// A container for all data for an individual table as part of a write
/// operation
pub struct TableData {
@@ -55,9 +103,27 @@ pub struct TableData {
partitioned_data: PartitionedData,
}
+impl TableData {
+ pub fn new(table: TableId, partitioned_data: PartitionedData) -> Self {
+ Self {
+ table,
+ partitioned_data,
+ }
+ }
+}
+
/// Partitioned data belonging to a write, sequenced individually from
/// other [`PartitionedData`]
pub struct PartitionedData {
sequence_number: SequenceNumber,
data: MutableBatch,
}
+
+impl PartitionedData {
+ pub fn new(sequence_number: SequenceNumber, data: MutableBatch) -> Self {
+ Self {
+ sequence_number,
+ data,
+ }
+ }
+}
|
e7511c0f33256e432d4c902431f87e59c111b8b2
|
Marco Neumann
|
2023-04-17 13:28:01
|
basic InfluxQL `SHOW MEASUREMENTS` (#7504)
|
No WHERE-clauses hat result in actual data checks yet, just pure metdata
queries.
Ref https://github.com/influxdata/idpe/issues/17358 .
| null |
feat: basic InfluxQL `SHOW MEASUREMENTS` (#7504)
No WHERE-clauses hat result in actual data checks yet, just pure metdata
queries.
Ref https://github.com/influxdata/idpe/issues/17358 .
|
diff --git a/influxdb_iox/tests/query_tests2/cases/in/influxql_metadata.influxql b/influxdb_iox/tests/query_tests2/cases/in/influxql_metadata.influxql
index 6caaa4c846..6e1f7df1ce 100644
--- a/influxdb_iox/tests/query_tests2/cases/in/influxql_metadata.influxql
+++ b/influxdb_iox/tests/query_tests2/cases/in/influxql_metadata.influxql
@@ -1,5 +1,27 @@
-- IOX_SETUP: InfluxQLSelectSupport
+-- SHOW MEASUREMENTS
+SHOW MEASUREMENTS;
+SHOW MEASUREMENTS LIMIT 2;
+SHOW MEASUREMENTS OFFSET 1;
+SHOW MEASUREMENTS LIMIT 1 OFFSET 2;
+SHOW MEASUREMENTS WITH MEASUREMENT =~ /m.*/;
+SHOW MEASUREMENTS WITH MEASUREMENT =~ /d\isk/;
+SHOW MEASUREMENTS WITH MEASUREMENT = disk;
+SHOW MEASUREMENTS WITH MEASUREMENT = does_not_exist;
+
+-- invalid queries for `SHOW MEASUREMENTS`
+SHOW MEASUREMENTS WITH MEASUREMENT = /my_db/;
+SHOW MEASUREMENTS WITH MEASUREMENT =~ my_db;
+
+-- unimplemented features in `SHOW MEASUREMENTS`
+SHOW MEASUREMENTS ON my_db;
+SHOW MEASUREMENTS WITH MEASUREMENT = x.my_db;
+SHOW MEASUREMENTS WITH MEASUREMENT = x.y.my_db;
+SHOW MEASUREMENTS WITH MEASUREMENT =~ x./my_db/;
+SHOW MEASUREMENTS WITH MEASUREMENT =~ x.y./my_db/;
+SHOW MEASUREMENTS WHERE "tag0" = 'x';
+
-- SHOW FIELD KEYS
SHOW FIELD KEYS;
SHOW FIELD KEYS LIMIT 2;
diff --git a/influxdb_iox/tests/query_tests2/cases/in/influxql_metadata.influxql.expected b/influxdb_iox/tests/query_tests2/cases/in/influxql_metadata.influxql.expected
index 0ac191d827..f278289215 100644
--- a/influxdb_iox/tests/query_tests2/cases/in/influxql_metadata.influxql.expected
+++ b/influxdb_iox/tests/query_tests2/cases/in/influxql_metadata.influxql.expected
@@ -1,4 +1,86 @@
-- Test Setup: InfluxQLSelectSupport
+-- InfluxQL: SHOW MEASUREMENTS;
+name: measurements
++------+
+| name |
++------+
+| cpu |
+| disk |
+| m0 |
+| m1 |
+| m2 |
+| m3 |
+| m4 |
++------+
+-- InfluxQL: SHOW MEASUREMENTS LIMIT 2;
+name: measurements
++------+
+| name |
++------+
+| cpu |
+| disk |
++------+
+-- InfluxQL: SHOW MEASUREMENTS OFFSET 1;
+name: measurements
++------+
+| name |
++------+
+| disk |
+| m0 |
+| m1 |
+| m2 |
+| m3 |
+| m4 |
++------+
+-- InfluxQL: SHOW MEASUREMENTS LIMIT 1 OFFSET 2;
+name: measurements
++------+
+| name |
++------+
+| m0 |
++------+
+-- InfluxQL: SHOW MEASUREMENTS WITH MEASUREMENT =~ /m.*/;
+name: measurements
++------+
+| name |
++------+
+| m0 |
+| m1 |
+| m2 |
+| m3 |
+| m4 |
++------+
+-- InfluxQL: SHOW MEASUREMENTS WITH MEASUREMENT =~ /d\isk/;
+name: measurements
++------+
+| name |
++------+
+| disk |
++------+
+-- InfluxQL: SHOW MEASUREMENTS WITH MEASUREMENT = disk;
+name: measurements
++------+
+| name |
++------+
+| disk |
++------+
+-- InfluxQL: SHOW MEASUREMENTS WITH MEASUREMENT = does_not_exist;
+-- InfluxQL: SHOW MEASUREMENTS WITH MEASUREMENT = /my_db/;
+Error while planning query: Error during planning: expected string but got regex
+-- InfluxQL: SHOW MEASUREMENTS WITH MEASUREMENT =~ my_db;
+Error while planning query: Error during planning: expected regex but got string
+-- InfluxQL: SHOW MEASUREMENTS ON my_db;
+Error while planning query: This feature is not implemented: SHOW MEASUREMENTS ON <database>
+-- InfluxQL: SHOW MEASUREMENTS WITH MEASUREMENT = x.my_db;
+Error while planning query: This feature is not implemented: retention policy in from clause
+-- InfluxQL: SHOW MEASUREMENTS WITH MEASUREMENT = x.y.my_db;
+Error while planning query: This feature is not implemented: database name in from clause
+-- InfluxQL: SHOW MEASUREMENTS WITH MEASUREMENT =~ x./my_db/;
+Error while planning query: This feature is not implemented: retention policy in from clause
+-- InfluxQL: SHOW MEASUREMENTS WITH MEASUREMENT =~ x.y./my_db/;
+Error while planning query: This feature is not implemented: database name in from clause
+-- InfluxQL: SHOW MEASUREMENTS WHERE "tag0" = 'x';
+Error while planning query: This feature is not implemented: SHOW MEASUREMENTS WHERE <condition>
-- InfluxQL: SHOW FIELD KEYS;
name: cpu
+--------------+-----------+
diff --git a/iox_query_influxql/src/frontend/planner.rs b/iox_query_influxql/src/frontend/planner.rs
index ce6a63d444..ffae5ed5d0 100644
--- a/iox_query_influxql/src/frontend/planner.rs
+++ b/iox_query_influxql/src/frontend/planner.rs
@@ -1,5 +1,6 @@
use arrow::datatypes::SchemaRef;
use influxdb_influxql_parser::show_field_keys::ShowFieldKeysStatement;
+use influxdb_influxql_parser::show_measurements::ShowMeasurementsStatement;
use influxdb_influxql_parser::show_tag_values::ShowTagValuesStatement;
use std::any::Any;
use std::collections::{HashMap, HashSet};
@@ -249,6 +250,17 @@ fn find_all_measurements(stmt: &Statement, tables: &[String]) -> Result<HashSet<
Ok(self)
}
+ fn post_visit_show_measurements_statement(
+ self,
+ sm: &ShowMeasurementsStatement,
+ ) -> Result<Self, Self::Error> {
+ if sm.with_measurement.is_none() {
+ self.0.extend(self.1.iter().cloned());
+ }
+
+ Ok(self)
+ }
+
fn post_visit_show_field_keys_statement(
self,
sfk: &ShowFieldKeysStatement,
@@ -328,6 +340,17 @@ mod test {
vec!["bar", "foo", "foobar"]
);
+ // Find all measurements in `SHOW MEASUREMENTS`
+ assert_eq!(find("SHOW MEASUREMENTS"), vec!["bar", "foo", "foobar"]);
+ assert_eq!(
+ find("SHOW MEASUREMENTS WITH MEASUREMENT = foo"),
+ vec!["foo"]
+ );
+ assert_eq!(
+ find("SHOW MEASUREMENTS WITH MEASUREMENT =~ /^foo/"),
+ vec!["foo", "foobar"]
+ );
+
// Find all measurements in `SHOW FIELD KEYS`
assert_eq!(find("SHOW FIELD KEYS"), vec!["bar", "foo", "foobar"]);
assert_eq!(find("SHOW FIELD KEYS FROM /^foo/"), vec!["foo", "foobar"]);
diff --git a/iox_query_influxql/src/plan/planner.rs b/iox_query_influxql/src/plan/planner.rs
index 5f6f2ff192..aea5be3279 100644
--- a/iox_query_influxql/src/plan/planner.rs
+++ b/iox_query_influxql/src/plan/planner.rs
@@ -11,8 +11,8 @@ use crate::plan::planner_time_range_expression::{
use crate::plan::rewriter::rewrite_statement;
use crate::plan::util::{binary_operator_to_df_operator, rebase_expr, Schemas};
use crate::plan::var_ref::{column_type_to_var_ref_data_type, var_ref_data_type_to_data_type};
-use arrow::array::StringBuilder;
-use arrow::datatypes::{DataType, Field as ArrowField, Schema as ArrowSchema};
+use arrow::array::{StringBuilder, StringDictionaryBuilder};
+use arrow::datatypes::{DataType, Field as ArrowField, Int32Type, Schema as ArrowSchema};
use arrow::record_batch::RecordBatch;
use chrono_tz::Tz;
use datafusion::catalog::TableReference;
@@ -44,6 +44,9 @@ use influxdb_influxql_parser::select::{
FillClause, GroupByClause, SLimitClause, SOffsetClause, TimeZoneClause,
};
use influxdb_influxql_parser::show_field_keys::ShowFieldKeysStatement;
+use influxdb_influxql_parser::show_measurements::{
+ ShowMeasurementsStatement, WithMeasurementClause,
+};
use influxdb_influxql_parser::show_tag_values::{ShowTagValuesStatement, WithKeyClause};
use influxdb_influxql_parser::simple_from_clause::ShowFromClause;
use influxdb_influxql_parser::{
@@ -194,8 +197,8 @@ impl<'a> InfluxQLToLogicalPlan<'a> {
Statement::ShowDatabases(_) => {
Err(DataFusionError::NotImplemented("SHOW DATABASES".into()))
}
- Statement::ShowMeasurements(_) => {
- Err(DataFusionError::NotImplemented("SHOW MEASUREMENTS".into()))
+ Statement::ShowMeasurements(show_measurements) => {
+ self.show_measurements_to_plan(*show_measurements)
}
Statement::ShowRetentionPolicies(_) => Err(DataFusionError::NotImplemented(
"SHOW RETENTION POLICIES".into(),
@@ -1341,7 +1344,7 @@ impl<'a> InfluxQLToLogicalPlan<'a> {
vec![Expr::Column(Column::new_unqualified(field_key_col)).sort(true, false)],
true,
&[],
- &[field_key_col],
+ &[],
)?;
Ok(plan)
@@ -1422,7 +1425,6 @@ impl<'a> InfluxQLToLogicalPlan<'a> {
schema: output_schema.to_dfschema_ref()?,
}),
};
-
let plan = plan_with_metadata(
plan,
&InfluxQlMetadata {
@@ -1440,7 +1442,138 @@ impl<'a> InfluxQLToLogicalPlan<'a> {
],
true,
&[],
- &[key_col, value_col],
+ &[],
+ )?;
+
+ Ok(plan)
+ }
+
+ fn show_measurements_to_plan(
+ &self,
+ show_measurements: ShowMeasurementsStatement,
+ ) -> Result<LogicalPlan> {
+ if show_measurements.on.is_some() {
+ // How do we handle this? Do we need to perform cross-namespace queries here?
+ return Err(DataFusionError::NotImplemented(
+ "SHOW MEASUREMENTS ON <database>".into(),
+ ));
+ }
+ if show_measurements.condition.is_some() {
+ return Err(DataFusionError::NotImplemented(
+ "SHOW MEASUREMENTS WHERE <condition>".into(),
+ ));
+ }
+
+ let tables = match show_measurements.with_measurement {
+ Some(
+ WithMeasurementClause::Equals(qualified_name)
+ | WithMeasurementClause::Regex(qualified_name),
+ ) if qualified_name.database.is_some() => {
+ return Err(DataFusionError::NotImplemented(
+ "database name in from clause".into(),
+ ));
+ }
+ Some(
+ WithMeasurementClause::Equals(qualified_name)
+ | WithMeasurementClause::Regex(qualified_name),
+ ) if qualified_name.retention_policy.is_some() => {
+ return Err(DataFusionError::NotImplemented(
+ "retention policy in from clause".into(),
+ ));
+ }
+ Some(WithMeasurementClause::Equals(qualified_name)) => match qualified_name.name {
+ MeasurementName::Name(n) => {
+ let names = self.s.table_names();
+ if names.into_iter().any(|table| table == n.as_str()) {
+ vec![n.as_str().to_owned()]
+ } else {
+ vec![]
+ }
+ }
+ MeasurementName::Regex(_) => {
+ return Err(DataFusionError::Plan(String::from(
+ "expected string but got regex",
+ )));
+ }
+ },
+ Some(WithMeasurementClause::Regex(qualified_name)) => match &qualified_name.name {
+ MeasurementName::Name(_) => {
+ return Err(DataFusionError::Plan(String::from(
+ "expected regex but got string",
+ )));
+ }
+ MeasurementName::Regex(regex) => {
+ let regex = parse_regex(regex)?;
+ let mut tables = self
+ .s
+ .table_names()
+ .into_iter()
+ .filter(|s| regex.is_match(s))
+ .map(|s| s.to_owned())
+ .collect::<Vec<_>>();
+ tables.sort();
+ tables
+ }
+ },
+ None => {
+ let mut tables = self
+ .s
+ .table_names()
+ .into_iter()
+ .map(|s| s.to_owned())
+ .collect::<Vec<_>>();
+ tables.sort();
+ tables
+ }
+ };
+
+ let name_col = "name";
+ let output_schema = Arc::new(ArrowSchema::new(vec![
+ ArrowField::new(
+ INFLUXQL_MEASUREMENT_COLUMN_NAME,
+ (&InfluxColumnType::Tag).into(),
+ false,
+ ),
+ ArrowField::new(name_col, (&InfluxColumnType::Tag).into(), false),
+ ]));
+
+ let mut dummy_measurement_names_builder = StringDictionaryBuilder::<Int32Type>::new();
+ let mut name_builder = StringDictionaryBuilder::<Int32Type>::new();
+ for table in tables {
+ dummy_measurement_names_builder.append_value("measurements");
+ name_builder.append_value(table);
+ }
+ let plan = LogicalPlanBuilder::scan(
+ "measurements",
+ provider_as_source(Arc::new(MemTable::try_new(
+ Arc::clone(&output_schema),
+ vec![vec![RecordBatch::try_new(
+ Arc::clone(&output_schema),
+ vec![
+ Arc::new(dummy_measurement_names_builder.finish()),
+ Arc::new(name_builder.finish()),
+ ],
+ )?]],
+ )?)),
+ None,
+ )?
+ .build()?;
+
+ let plan = plan_with_metadata(
+ plan,
+ &InfluxQlMetadata {
+ measurement_column_index: MEASUREMENT_COLUMN_INDEX,
+ tag_key_columns: vec![],
+ },
+ )?;
+ let plan = self.limit(
+ plan,
+ show_measurements.offset,
+ show_measurements.limit,
+ vec![Expr::Column(Column::new_unqualified(name_col)).sort(true, false)],
+ true,
+ &[],
+ &[],
)?;
Ok(plan)
@@ -1985,7 +2118,6 @@ mod test {
assert_snapshot!(plan("DELETE FROM foo"), @"This feature is not implemented: DELETE");
assert_snapshot!(plan("DROP MEASUREMENT foo"), @"This feature is not implemented: DROP MEASUREMENT");
assert_snapshot!(plan("SHOW DATABASES"), @"This feature is not implemented: SHOW DATABASES");
- assert_snapshot!(plan("SHOW MEASUREMENTS"), @"This feature is not implemented: SHOW MEASUREMENTS");
assert_snapshot!(plan("SHOW RETENTION POLICIES"), @"This feature is not implemented: SHOW RETENTION POLICIES");
assert_snapshot!(plan("SHOW TAG KEYS"), @"This feature is not implemented: SHOW TAG KEYS");
}
@@ -1997,7 +2129,7 @@ mod test {
fn test_show_field_keys() {
assert_snapshot!(plan("SHOW FIELD KEYS"), @"TableScan: field_keys [iox::measurement:Utf8, fieldKey:Utf8, fieldType:Utf8]");
assert_snapshot!(plan("SHOW FIELD KEYS LIMIT 1 OFFSET 2"), @r###"
- Sort: field_keys.iox::measurement ASC NULLS LAST, field_keys.fieldKey ASC NULLS LAST, field_keys.fieldKey ASC NULLS LAST [iox::measurement:Utf8, fieldKey:Utf8, fieldType:Utf8]
+ Sort: field_keys.iox::measurement ASC NULLS LAST, field_keys.fieldKey ASC NULLS LAST [iox::measurement:Utf8, fieldKey:Utf8, fieldType:Utf8]
Projection: field_keys.iox::measurement, field_keys.fieldKey, field_keys.fieldType [iox::measurement:Utf8, fieldKey:Utf8, fieldType:Utf8]
Filter: iox::row BETWEEN Int64(3) AND Int64(3) [iox::measurement:Utf8, fieldKey:Utf8, fieldType:Utf8, iox::row:UInt64;N]
WindowAggr: windowExpr=[[ROW_NUMBER() PARTITION BY [field_keys.iox::measurement] ORDER BY [field_keys.fieldKey ASC NULLS LAST] ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW AS iox::row]] [iox::measurement:Utf8, fieldKey:Utf8, fieldType:Utf8, iox::row:UInt64;N]
@@ -2005,6 +2137,18 @@ mod test {
"###);
}
+ #[test]
+ fn test_snow_measurements() {
+ assert_snapshot!(plan("SHOW MEASUREMENTS"), @"TableScan: measurements [iox::measurement:Dictionary(Int32, Utf8), name:Dictionary(Int32, Utf8)]");
+ assert_snapshot!(plan("SHOW MEASUREMENTS LIMIT 1 OFFSET 2"), @r###"
+ Sort: measurements.iox::measurement ASC NULLS LAST, measurements.name ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), name:Dictionary(Int32, Utf8)]
+ Projection: measurements.iox::measurement, measurements.name [iox::measurement:Dictionary(Int32, Utf8), name:Dictionary(Int32, Utf8)]
+ Filter: iox::row BETWEEN Int64(3) AND Int64(3) [iox::measurement:Dictionary(Int32, Utf8), name:Dictionary(Int32, Utf8), iox::row:UInt64;N]
+ WindowAggr: windowExpr=[[ROW_NUMBER() PARTITION BY [measurements.iox::measurement] ORDER BY [measurements.name ASC NULLS LAST] ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW AS iox::row]] [iox::measurement:Dictionary(Int32, Utf8), name:Dictionary(Int32, Utf8), iox::row:UInt64;N]
+ TableScan: measurements [iox::measurement:Dictionary(Int32, Utf8), name:Dictionary(Int32, Utf8)]
+ "###);
+ }
+
#[test]
fn test_show_tag_values() {
assert_snapshot!(plan("SHOW TAG VALUES WITH KEY = bar"), @r###"
@@ -2015,7 +2159,7 @@ mod test {
TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
"###);
assert_snapshot!(plan("SHOW TAG VALUES WITH KEY = bar LIMIT 1 OFFSET 2"), @r###"
- Sort: iox::measurement ASC NULLS LAST, key ASC NULLS LAST, value ASC NULLS LAST, key ASC NULLS LAST, value ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), key:Dictionary(Int32, Utf8), value:Dictionary(Int32, Utf8);N]
+ Sort: iox::measurement ASC NULLS LAST, key ASC NULLS LAST, value ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), key:Dictionary(Int32, Utf8), value:Dictionary(Int32, Utf8);N]
Projection: iox::measurement, key, value [iox::measurement:Dictionary(Int32, Utf8), key:Dictionary(Int32, Utf8), value:Dictionary(Int32, Utf8);N]
Filter: iox::row BETWEEN Int64(3) AND Int64(3) [iox::measurement:Dictionary(Int32, Utf8), key:Dictionary(Int32, Utf8), value:Dictionary(Int32, Utf8);N, iox::row:UInt64;N]
WindowAggr: windowExpr=[[ROW_NUMBER() PARTITION BY [iox::measurement] ORDER BY [key ASC NULLS LAST, value ASC NULLS LAST] ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW AS iox::row]] [iox::measurement:Dictionary(Int32, Utf8), key:Dictionary(Int32, Utf8), value:Dictionary(Int32, Utf8);N, iox::row:UInt64;N]
|
8bb631e86b6e359f08060a9def4c7b3f489cf35e
|
Dom Dwyer
|
2023-06-14 16:05:35
|
fix revert conflicts
|
This fixes the non-compiling revert code.
| null |
refactor: fix revert conflicts
This fixes the non-compiling revert code.
|
diff --git a/mutable_batch/src/payload/partition.rs b/mutable_batch/src/payload/partition.rs
index 8652b01ee9..7e59838975 100644
--- a/mutable_batch/src/payload/partition.rs
+++ b/mutable_batch/src/payload/partition.rs
@@ -91,13 +91,9 @@ impl<'a> Template<'a> {
) -> Result<(), PartitionKeyError> {
match self {
Template::TagValue(col) if col.valid.get(idx) => match &col.data {
- ColumnData::Tag(col_data, dictionary, _) => out.write_str(never_empty(
- Cow::from(utf8_percent_encode(
- dictionary.lookup_id(col_data[idx]).unwrap(),
- &ENCODED_PARTITION_KEY_CHARS,
- ))
- .as_ref(),
- ))?,
+ ColumnData::Tag(col_data, dictionary, _) => out.write_str(
+ encode_key_part(dictionary.lookup_id(col_data[idx]).unwrap()).as_ref(),
+ )?,
_ => return Err(PartitionKeyError::TagValueNotTag(col.influx_type())),
},
Template::TimeFormat(t, fmt) => fmt.render(t[idx], out)?,
|
b32662ebf255f5e403ed73f75d2197978fb9e1c0
|
Dom Dwyer
|
2023-01-24 13:38:36
|
router balancer recovery
|
Ensure a recovering node is yielded from the balancer.
| null |
test: router balancer recovery
Ensure a recovering node is yielded from the balancer.
|
diff --git a/router/src/dml_handlers/rpc_write/balancer.rs b/router/src/dml_handlers/rpc_write/balancer.rs
index f421c8fafd..1ad1fc4d57 100644
--- a/router/src/dml_handlers/rpc_write/balancer.rs
+++ b/router/src/dml_handlers/rpc_write/balancer.rs
@@ -177,6 +177,45 @@ mod tests {
assert_eq!(circuit_err_2.err_count(), 0);
}
+ /// An unhealthy node that recovers is yielded to the caller.
+ #[tokio::test]
+ async fn test_balancer_upstream_recovery() {
+ const BALANCER_CALLS: usize = 10;
+
+ // Initialise 3 RPC clients and configure their mock circuit breakers;
+ // two returns a unhealthy state, one is healthy.
+ let circuit = Arc::new(MockCircuitBreaker::default());
+ circuit.set_usable(false);
+ let client = CircuitBreakingClient::new(Arc::new(MockWriteClient::default()))
+ .with_circuit_breaker(Arc::clone(&circuit));
+
+ assert_eq!(circuit.ok_count(), 0);
+
+ let balancer = Balancer::new([client]);
+
+ let mut endpoints = balancer.endpoints();
+ assert_matches!(endpoints.next(), None);
+ assert_eq!(circuit.is_usable_count(), 1);
+
+ circuit.set_usable(true);
+
+ let mut endpoints = balancer.endpoints();
+ assert_matches!(endpoints.next(), Some(_));
+ assert_eq!(circuit.is_usable_count(), 2);
+
+ // The now-healthy client is constantly yielded.
+ const N: usize = 3;
+ for _ in 0..N {
+ endpoints
+ .next()
+ .expect("should yield healthy client")
+ .write(WriteRequest::default())
+ .await
+ .expect("should succeed");
+ }
+ assert_eq!(circuit.ok_count(), N);
+ }
+
// Ensure the balancer round-robins across all healthy clients.
//
// Note this is a property test that asserts the even distribution of the
|
ac656ab1f9563b53b34c2efc2e7b835943e2b8c4
|
Dom Dwyer
|
2023-05-08 12:09:22
|
clearer NoHealthyUpstreams error name
|
Renames NoUpstreams -> NoHealthyUpstreams as it's confusing because we
also have "not enough replicas" which could be no upstreams? This has a
slightly clearer meaning.
| null |
refactor: clearer NoHealthyUpstreams error name
Renames NoUpstreams -> NoHealthyUpstreams as it's confusing because we
also have "not enough replicas" which could be no upstreams? This has a
slightly clearer meaning.
|
diff --git a/router/src/dml_handlers/rpc_write.rs b/router/src/dml_handlers/rpc_write.rs
index 32f29039b9..5865d1854a 100644
--- a/router/src/dml_handlers/rpc_write.rs
+++ b/router/src/dml_handlers/rpc_write.rs
@@ -46,7 +46,7 @@ pub enum RpcWriteError {
/// There are no healthy ingesters to route a write to.
#[error("no healthy upstream ingesters available")]
- NoUpstreams,
+ NoHealthyUpstreams,
/// The write request was not attempted, because not enough upstream
/// ingesters needed to satisfy the configured replication factor are
@@ -205,7 +205,7 @@ where
let mut snap = self
.endpoints
.endpoints()
- .ok_or(RpcWriteError::NoUpstreams)?;
+ .ok_or(RpcWriteError::NoHealthyUpstreams)?;
// Validate the required number of writes is possible given the current
// number of healthy endpoints.
@@ -234,10 +234,10 @@ where
// This error is an internal implementation detail - the
// meaningful error for the user is "there's no healthy
// upstreams".
- RpcWriteError::Client(_) => RpcWriteError::NoUpstreams,
+ RpcWriteError::Client(_) => RpcWriteError::NoHealthyUpstreams,
// The number of upstreams no longer satisfies the desired
// replication factor.
- RpcWriteError::NoUpstreams => RpcWriteError::NotEnoughReplicas,
+ RpcWriteError::NoHealthyUpstreams => RpcWriteError::NotEnoughReplicas,
// All other errors pass through.
v => v,
}
@@ -284,7 +284,7 @@ where
loop {
match endpoints
.next()
- .ok_or(RpcWriteError::NoUpstreams)?
+ .ok_or(RpcWriteError::NoHealthyUpstreams)?
.write(req.clone())
.await
{
@@ -607,7 +607,7 @@ mod tests {
)
.await;
- assert_matches!(got, Err(RpcWriteError::NoUpstreams));
+ assert_matches!(got, Err(RpcWriteError::NoHealthyUpstreams));
}
/// Assert the error response when the only upstream continuously returns an
@@ -628,7 +628,7 @@ mod tests {
)
.await;
- assert_matches!(got, Err(RpcWriteError::NoUpstreams));
+ assert_matches!(got, Err(RpcWriteError::NoHealthyUpstreams));
}
/// Assert that an [`RpcWriteClientError::UpstreamNotConnected`] error is mapped
@@ -649,7 +649,7 @@ mod tests {
)
.await;
- assert_matches!(got, Err(RpcWriteError::NoUpstreams));
+ assert_matches!(got, Err(RpcWriteError::NoHealthyUpstreams));
}
/// Assert that an error is returned without any RPC request being made when
diff --git a/router/src/server/http.rs b/router/src/server/http.rs
index 8324ccf42e..d5cd70bcb3 100644
--- a/router/src/server/http.rs
+++ b/router/src/server/http.rs
@@ -158,7 +158,7 @@ impl From<&DmlError> for StatusCode {
)) => StatusCode::SERVICE_UNAVAILABLE,
DmlError::RpcWrite(RpcWriteError::Timeout(_)) => StatusCode::GATEWAY_TIMEOUT,
DmlError::RpcWrite(
- RpcWriteError::NoUpstreams
+ RpcWriteError::NoHealthyUpstreams
| RpcWriteError::NotEnoughReplicas
| RpcWriteError::PartialWrite { .. },
) => StatusCode::SERVICE_UNAVAILABLE,
|
6ea8c99c0147705da0761621bc397edbfe5e1f4d
|
Dom Dwyer
|
2023-08-01 17:00:38
|
accessor for table partition proto
|
Allow the Table partition template protobuf to be accessed (if
specified).
| null |
refactor: accessor for table partition proto
Allow the Table partition template protobuf to be accessed (if
specified).
|
diff --git a/data_types/src/partition_template.rs b/data_types/src/partition_template.rs
index ee67cc2b99..cb786a7b15 100644
--- a/data_types/src/partition_template.rs
+++ b/data_types/src/partition_template.rs
@@ -372,6 +372,11 @@ impl TablePartitionTemplateOverride {
})
.unwrap_or_default()
}
+
+ /// Return the protobuf representation of this template.
+ pub fn as_proto(&self) -> Option<&proto::PartitionTemplate> {
+ self.0.as_ref().map(|v| v.inner())
+ }
}
/// This manages the serialization/deserialization of the `proto::PartitionTemplate` type to and
|
7b4a1a06609ac31e9028a6e01eec902c903a3b29
|
Stuart Carnie
|
2023-06-22 12:15:47
|
PR feedback
|
Add tests for fewer rows than N for `moving_average`
|
See: https://github.com/influxdata/influxdb_iox/pull/8023#discussion_r1237298376
|
chore: PR feedback
Add tests for fewer rows than N for `moving_average`
See: https://github.com/influxdata/influxdb_iox/pull/8023#discussion_r1237298376
|
diff --git a/influxdb_iox/tests/query_tests/cases/in/window_like.influxql b/influxdb_iox/tests/query_tests/cases/in/window_like.influxql
index aef59ef8c2..8e2a2f392f 100644
--- a/influxdb_iox/tests/query_tests/cases/in/window_like.influxql
+++ b/influxdb_iox/tests/query_tests/cases/in/window_like.influxql
@@ -42,6 +42,15 @@ SELECT non_negative_difference(mean(usage_idle)) FROM cpu WHERE time >= 00000001
-- source data has gaps
SELECT moving_average(writes, 3) FROM diskio WHERE time >= 0000000130000000000 AND time < 0000000210000000001;
SELECT moving_average(reads, 3) FROM diskio WHERE time >= 0000000130000000000 AND time < 0000000210000000001;
+--
+-- test with fewer rows than 3
+--
+-- one row of input
+SELECT moving_average(writes, 3) FROM diskio WHERE time >= 0000000210000000000;
+-- two rows of input
+SELECT moving_average(writes, 3) FROM diskio WHERE time >= 0000000200000000000;
+-- three rows of input
+SELECT moving_average(writes, 3) FROM diskio WHERE time >= 0000000190000000000;
--
-- moving_average + aggregate
diff --git a/influxdb_iox/tests/query_tests/cases/in/window_like.influxql.expected b/influxdb_iox/tests/query_tests/cases/in/window_like.influxql.expected
index 5ec316b8d7..276a741127 100644
--- a/influxdb_iox/tests/query_tests/cases/in/window_like.influxql.expected
+++ b/influxdb_iox/tests/query_tests/cases/in/window_like.influxql.expected
@@ -225,6 +225,23 @@ name: diskio
| 1970-01-01T00:03:20 | 2593415.3333333335 |
| 1970-01-01T00:03:30 | 2593587.3333333335 |
+---------------------+--------------------+
+-- InfluxQL: SELECT moving_average(writes, 3) FROM diskio WHERE time >= 0000000210000000000;
++------+----------------+
+| time | moving_average |
++------+----------------+
++------+----------------+
+-- InfluxQL: SELECT moving_average(writes, 3) FROM diskio WHERE time >= 0000000200000000000;
++------+----------------+
+| time | moving_average |
++------+----------------+
++------+----------------+
+-- InfluxQL: SELECT moving_average(writes, 3) FROM diskio WHERE time >= 0000000190000000000;
+name: diskio
++---------------------+-------------------+
+| time | moving_average |
++---------------------+-------------------+
+| 1970-01-01T00:03:30 | 5593612.333333333 |
++---------------------+-------------------+
-- InfluxQL: SELECT moving_average(mean(writes), 3) FROM diskio WHERE time >= 0000000130000000000 AND time < 0000000210000000001 GROUP BY time(7s);
name: diskio
+---------------------+-------------------+
|
d2874f5c028eff78edd5c3f3286579d02768bad4
|
Marco Neumann
|
2023-03-16 14:14:08
|
fix chunk boundaries for retention query tests (#7235)
|
See code comment, found while working on #6098.
| null |
test: fix chunk boundaries for retention query tests (#7235)
See code comment, found while working on #6098.
|
diff --git a/influxdb_iox/tests/query_tests2/cases/in/retention.sql.expected b/influxdb_iox/tests/query_tests2/cases/in/retention.sql.expected
index 77e2622229..404a5c634c 100644
--- a/influxdb_iox/tests/query_tests2/cases/in/retention.sql.expected
+++ b/influxdb_iox/tests/query_tests2/cases/in/retention.sql.expected
@@ -3,9 +3,9 @@
+------+------+----------------------+
| host | load | time |
+------+------+----------------------+
-| a | 1.0 | 2022-01-01T01:00:00Z |
-| b | 2.0 | 2022-01-01T01:00:00Z |
-| bb | 21.0 | 2022-01-01T01:00:00Z |
+| a | 1.0 | 2022-01-01T11:00:00Z |
+| b | 2.0 | 2022-01-01T11:00:00Z |
+| bb | 21.0 | 2022-01-01T11:00:00Z |
+------+------+----------------------+
-- SQL: EXPLAIN SELECT * FROM cpu order by host, load, time;
-- Results After Normalizing UUIDs
@@ -31,8 +31,8 @@
+------+------+----------------------+
| host | load | time |
+------+------+----------------------+
-| a | 1.0 | 2022-01-01T01:00:00Z |
-| bb | 21.0 | 2022-01-01T01:00:00Z |
+| a | 1.0 | 2022-01-01T11:00:00Z |
+| bb | 21.0 | 2022-01-01T11:00:00Z |
+------+------+----------------------+
-- SQL: EXPLAIN SELECT * FROM cpu WHERE host != 'b' ORDER BY host,time;
-- Results After Normalizing UUIDs
diff --git a/influxdb_iox/tests/query_tests2/setups.rs b/influxdb_iox/tests/query_tests2/setups.rs
index b3b174bdaf..1a29efa5be 100644
--- a/influxdb_iox/tests/query_tests2/setups.rs
+++ b/influxdb_iox/tests/query_tests2/setups.rs
@@ -1291,7 +1291,12 @@ impl RetentionSetup {
let retention_period_1_hour_ns = 3600 * 1_000_000_000;
// Data is relative to this particular time stamp
- let cutoff = Time::from_rfc3339("2022-01-01T00:00:00+00:00")
+ //
+ // Use a cutoff date that is NOT at the start of the partition so that `lp_partially_inside` only spans a single
+ // partition, not two. This is important because otherwise this will result in two chunks / files, not one.
+ // However a partial inside/outside chunk is important for the query tests so that we can proof that it is not
+ // sufficient to prune the chunks solely on statistics but that there needs to be an actual row-wise filter.
+ let cutoff = Time::from_rfc3339("2022-01-01T10:00:00+00:00")
.unwrap()
.timestamp_nanos();
// Timestamp 1 hour later than the cutoff, so the data will be retained for 1 hour
|
db24a62658729a7185a346ca423d3eb12ed1a5c9
|
Trevor Hilton
|
2025-01-12 11:40:47
|
change host-id to writer-id (#25804)
|
This changes the CLI arg `host-id` to `writer-id` to more accurately
indicate meaning.
This changes also goes through the codebase and changes struct fields,
methods, and variables to use the term `writer_id` or `writer_identifier_prefix`
instead of `host_id` etc., to make the meaning clear in the code.
This also changes the catalog serialization to use the field `writer_id`
instead of `host_id`, which is breaking change.
| null |
refactor: change host-id to writer-id (#25804)
This changes the CLI arg `host-id` to `writer-id` to more accurately
indicate meaning.
This changes also goes through the codebase and changes struct fields,
methods, and variables to use the term `writer_id` or `writer_identifier_prefix`
instead of `host_id` etc., to make the meaning clear in the code.
This also changes the catalog serialization to use the field `writer_id`
instead of `host_id`, which is breaking change.
|
diff --git a/influxdb3/src/commands/create.rs b/influxdb3/src/commands/create.rs
index ed58827ab1..049b9b1bb2 100644
--- a/influxdb3/src/commands/create.rs
+++ b/influxdb3/src/commands/create.rs
@@ -377,7 +377,7 @@ pub async fn command(config: Config) -> Result<(), Box<dyn Error>> {
"\
Token: {token}\n\
Hashed Token: {hashed}\n\n\
- Start the server with `influxdb3 serve --bearer-token {hashed} --object-store file --data-dir ~/.influxdb3 --host-id YOUR_HOST_NAME`\n\n\
+ Start the server with `influxdb3 serve --bearer-token {hashed} --object-store file --data-dir ~/.influxdb3 --writer-id YOUR_HOST_NAME`\n\n\
HTTP requests require the following header: \"Authorization: Bearer {token}\"\n\
This will grant you access to every HTTP endpoint or deny it otherwise
",
diff --git a/influxdb3/src/commands/serve.rs b/influxdb3/src/commands/serve.rs
index abfdc8f797..e74e600bde 100644
--- a/influxdb3/src/commands/serve.rs
+++ b/influxdb3/src/commands/serve.rs
@@ -240,10 +240,17 @@ pub struct Config {
)]
pub buffer_mem_limit_mb: usize,
- /// The host idendifier used as a prefix in all object store file paths. This should be unique
- /// for any hosts that share the same object store configuration, i.e., the same bucket.
- #[clap(long = "host-id", env = "INFLUXDB3_HOST_IDENTIFIER_PREFIX", action)]
- pub host_identifier_prefix: String,
+ /// The writer idendifier used as a prefix in all object store file paths. This should be unique
+ /// for any InfluxDB 3 Core servers that share the same object store configuration, i.e., the
+ /// same bucket.
+ #[clap(
+ long = "writer-id",
+ // TODO: deprecate this alias in future version
+ alias = "host-id",
+ env = "INFLUXDB3_WRITER_IDENTIFIER_PREFIX",
+ action
+ )]
+ pub writer_identifier_prefix: String,
/// The size of the in-memory Parquet cache in megabytes (MB).
#[clap(
@@ -383,7 +390,7 @@ pub async fn command(config: Config) -> Result<()> {
let num_cpus = num_cpus::get();
let build_malloc_conf = build_malloc_conf();
info!(
- host_id = %config.host_identifier_prefix,
+ writer_id = %config.writer_identifier_prefix,
git_hash = %INFLUXDB3_GIT_HASH as &str,
version = %INFLUXDB3_VERSION.as_ref() as &str,
uuid = %PROCESS_UUID.as_ref() as &str,
@@ -473,7 +480,7 @@ pub async fn command(config: Config) -> Result<()> {
let persister = Arc::new(Persister::new(
Arc::clone(&object_store),
- config.host_identifier_prefix,
+ config.writer_identifier_prefix,
));
let wal_config = WalConfig {
gen1_duration: config.gen1_duration,
diff --git a/influxdb3/src/main.rs b/influxdb3/src/main.rs
index 6cbe80b142..9acdf1d5d2 100644
--- a/influxdb3/src/main.rs
+++ b/influxdb3/src/main.rs
@@ -55,7 +55,7 @@ long_about = r#"InfluxDB 3 Core server and command line tools
Examples:
# Run the InfluxDB 3 Core server
- influxdb3 serve --object-store file --data-dir ~/.influxdb3 --host_id my_host_name
+ influxdb3 serve --object-store file --data-dir ~/.influxdb3 --writer_id my_writer_name
# Display all commands short form
influxdb3 -h
@@ -64,10 +64,10 @@ Examples:
influxdb3 --help
# Run the InfluxDB 3 Core server with extra verbose logging
- influxdb3 serve -v --object-store file --data-dir ~/.influxdb3 --host_id my_host_name
+ influxdb3 serve -v --object-store file --data-dir ~/.influxdb3 --writer_id my_writer_name
# Run InfluxDB 3 Core with full debug logging specified with LOG_FILTER
- LOG_FILTER=debug influxdb3 serve --object-store file --data-dir ~/.influxdb3 --host_id my_host_name
+ LOG_FILTER=debug influxdb3 serve --object-store file --data-dir ~/.influxdb3 --writer_id my_writer_name
"#
)]
struct Config {
diff --git a/influxdb3/tests/server/main.rs b/influxdb3/tests/server/main.rs
index 9ec9641d2c..9c75f589d4 100644
--- a/influxdb3/tests/server/main.rs
+++ b/influxdb3/tests/server/main.rs
@@ -47,7 +47,7 @@ trait ConfigProvider {
#[derive(Debug, Default)]
pub struct TestConfig {
auth_token: Option<(String, String)>,
- host_id: Option<String>,
+ writer_id: Option<String>,
plugin_dir: Option<String>,
}
@@ -63,8 +63,8 @@ impl TestConfig {
}
/// Set a host identifier prefix on the spawned [`TestServer`]
- pub fn with_host_id<S: Into<String>>(mut self, host_id: S) -> Self {
- self.host_id = Some(host_id.into());
+ pub fn with_writer_id<S: Into<String>>(mut self, writer_id: S) -> Self {
+ self.writer_id = Some(writer_id.into());
self
}
@@ -84,8 +84,8 @@ impl ConfigProvider for TestConfig {
if let Some(plugin_dir) = &self.plugin_dir {
args.append(&mut vec!["--plugin-dir".to_string(), plugin_dir.to_owned()]);
}
- args.push("--host-id".to_string());
- if let Some(host) = &self.host_id {
+ args.push("--writer-id".to_string());
+ if let Some(host) = &self.writer_id {
args.push(host.to_owned());
} else {
args.push("test-server".to_string());
diff --git a/influxdb3_cache/src/last_cache/mod.rs b/influxdb3_cache/src/last_cache/mod.rs
index 72bd291626..b75a0bc9d0 100644
--- a/influxdb3_cache/src/last_cache/mod.rs
+++ b/influxdb3_cache/src/last_cache/mod.rs
@@ -1349,9 +1349,9 @@ mod tests {
.insert(table_def.table_id, Arc::new(table_def));
// Create the catalog and clone its InnerCatalog (which is what the LastCacheProvider is
// initialized from):
- let host_id = Arc::from("sample-host-id");
+ let writer_id = Arc::from("sample-host-id");
let instance_id = Arc::from("sample-instance-id");
- let catalog = Catalog::new(host_id, instance_id);
+ let catalog = Catalog::new(writer_id, instance_id);
let db_id = database.id;
catalog.insert_database(database);
let catalog = Arc::new(catalog);
diff --git a/influxdb3_catalog/src/catalog.rs b/influxdb3_catalog/src/catalog.rs
index 3af16ed93c..570baf182e 100644
--- a/influxdb3_catalog/src/catalog.rs
+++ b/influxdb3_catalog/src/catalog.rs
@@ -195,9 +195,9 @@ impl Catalog {
/// Limit for the number of tables across all DBs that InfluxDB 3 Core OSS can have
pub(crate) const NUM_TABLES_LIMIT: usize = 2000;
- pub fn new(host_id: Arc<str>, instance_id: Arc<str>) -> Self {
+ pub fn new(writer_id: Arc<str>, instance_id: Arc<str>) -> Self {
Self {
- inner: RwLock::new(InnerCatalog::new(host_id, instance_id)),
+ inner: RwLock::new(InnerCatalog::new(writer_id, instance_id)),
}
}
@@ -304,8 +304,8 @@ impl Catalog {
Arc::clone(&self.inner.read().instance_id)
}
- pub fn host_id(&self) -> Arc<str> {
- Arc::clone(&self.inner.read().host_id)
+ pub fn writer_id(&self) -> Arc<str> {
+ Arc::clone(&self.inner.read().writer_id)
}
#[cfg(test)]
@@ -364,8 +364,9 @@ pub struct InnerCatalog {
/// The catalog is a map of databases with their table schemas
databases: SerdeVecMap<DbId, Arc<DatabaseSchema>>,
sequence: CatalogSequenceNumber,
- /// The host_id is the prefix that is passed in when starting up (`host_identifier_prefix`)
- host_id: Arc<str>,
+ /// The `writer_id` is the prefix that is passed in when starting up
+ /// (`writer_identifier_prefix`)
+ writer_id: Arc<str>,
/// The instance_id uniquely identifies the instance that generated the catalog
instance_id: Arc<str>,
/// If true, the catalog has been updated since the last time it was serialized
@@ -429,11 +430,11 @@ serde_with::serde_conv!(
);
impl InnerCatalog {
- pub(crate) fn new(host_id: Arc<str>, instance_id: Arc<str>) -> Self {
+ pub(crate) fn new(writer_id: Arc<str>, instance_id: Arc<str>) -> Self {
Self {
databases: SerdeVecMap::new(),
sequence: CatalogSequenceNumber::new(0),
- host_id,
+ writer_id,
instance_id,
updated: false,
db_map: BiHashMap::new(),
@@ -1419,10 +1420,10 @@ mod tests {
#[test]
fn catalog_serialization() {
- let host_id = Arc::from("sample-host-id");
+ let writer_id = Arc::from("sample-host-id");
let instance_id = Arc::from("instance-id");
let cloned_instance_id = Arc::clone(&instance_id);
- let catalog = Catalog::new(host_id, cloned_instance_id);
+ let catalog = Catalog::new(writer_id, cloned_instance_id);
let mut database = DatabaseSchema {
id: DbId::from(0),
name: "test_db".into(),
@@ -1532,7 +1533,7 @@ mod tests {
]
],
"sequence": 0,
- "host_id": "test",
+ "writer_id": "test",
"instance_id": "test",
"db_map": []
}"#;
@@ -1578,7 +1579,7 @@ mod tests {
]
],
"sequence": 0,
- "host_id": "test",
+ "writer_id": "test",
"instance_id": "test",
"db_map": []
}"#;
@@ -1691,9 +1692,9 @@ mod tests {
#[test]
fn serialize_series_keys() {
- let host_id = Arc::from("sample-host-id");
+ let writer_id = Arc::from("sample-host-id");
let instance_id = Arc::from("instance-id");
- let catalog = Catalog::new(host_id, instance_id);
+ let catalog = Catalog::new(writer_id, instance_id);
let mut database = DatabaseSchema {
id: DbId::from(0),
name: "test_db".into(),
@@ -1748,9 +1749,9 @@ mod tests {
#[test]
fn serialize_last_cache() {
- let host_id = Arc::from("sample-host-id");
+ let writer_id = Arc::from("sample-host-id");
let instance_id = Arc::from("instance-id");
- let catalog = Catalog::new(host_id, instance_id);
+ let catalog = Catalog::new(writer_id, instance_id);
let mut database = DatabaseSchema {
id: DbId::from(0),
name: "test_db".into(),
@@ -1814,14 +1815,14 @@ mod tests {
}
#[test]
- fn catalog_instance_and_host_ids() {
- let host_id = Arc::from("sample-host-id");
+ fn catalog_instance_and_writer_ids() {
+ let writer_id = Arc::from("sample-host-id");
let instance_id = Arc::from("sample-instance-id");
- let cloned_host_id = Arc::clone(&host_id);
+ let cloned_writer_id = Arc::clone(&writer_id);
let cloned_instance_id = Arc::clone(&instance_id);
- let catalog = Catalog::new(cloned_host_id, cloned_instance_id);
+ let catalog = Catalog::new(cloned_writer_id, cloned_instance_id);
assert_eq!(instance_id, catalog.instance_id());
- assert_eq!(host_id, catalog.host_id());
+ assert_eq!(writer_id, catalog.writer_id());
}
/// See: https://github.com/influxdata/influxdb/issues/25524
diff --git a/influxdb3_catalog/src/snapshots/influxdb3_catalog__catalog__tests__catalog_serialization.snap b/influxdb3_catalog/src/snapshots/influxdb3_catalog__catalog__tests__catalog_serialization.snap
index 8007db91c7..b98f1bb696 100644
--- a/influxdb3_catalog/src/snapshots/influxdb3_catalog__catalog__tests__catalog_serialization.snap
+++ b/influxdb3_catalog/src/snapshots/influxdb3_catalog__catalog__tests__catalog_serialization.snap
@@ -2,7 +2,6 @@
source: influxdb3_catalog/src/catalog.rs
description: catalog serialization to help catch breaking changes
expression: catalog
-snapshot_kind: text
---
{
"databases": [
@@ -270,7 +269,7 @@ snapshot_kind: text
]
],
"sequence": 0,
- "host_id": "sample-host-id",
+ "writer_id": "sample-host-id",
"instance_id": "instance-id",
"db_map": []
}
diff --git a/influxdb3_catalog/src/snapshots/influxdb3_catalog__catalog__tests__serialize_last_cache.snap b/influxdb3_catalog/src/snapshots/influxdb3_catalog__catalog__tests__serialize_last_cache.snap
index f461d190ff..86b7d671ac 100644
--- a/influxdb3_catalog/src/snapshots/influxdb3_catalog__catalog__tests__serialize_last_cache.snap
+++ b/influxdb3_catalog/src/snapshots/influxdb3_catalog__catalog__tests__serialize_last_cache.snap
@@ -2,7 +2,6 @@
source: influxdb3_catalog/src/catalog.rs
description: catalog serialization to help catch breaking changes
expression: catalog
-snapshot_kind: text
---
{
"databases": [
@@ -121,7 +120,7 @@ snapshot_kind: text
]
],
"sequence": 0,
- "host_id": "sample-host-id",
+ "writer_id": "sample-host-id",
"instance_id": "instance-id",
"db_map": []
}
diff --git a/influxdb3_catalog/src/snapshots/influxdb3_catalog__catalog__tests__serialize_series_keys.snap b/influxdb3_catalog/src/snapshots/influxdb3_catalog__catalog__tests__serialize_series_keys.snap
index 5600f91c22..7bccf9f960 100644
--- a/influxdb3_catalog/src/snapshots/influxdb3_catalog__catalog__tests__serialize_series_keys.snap
+++ b/influxdb3_catalog/src/snapshots/influxdb3_catalog__catalog__tests__serialize_series_keys.snap
@@ -2,7 +2,6 @@
source: influxdb3_catalog/src/catalog.rs
description: catalog serialization to help catch breaking changes
expression: catalog
-snapshot_kind: text
---
{
"databases": [
@@ -105,7 +104,7 @@ snapshot_kind: text
]
],
"sequence": 0,
- "host_id": "sample-host-id",
+ "writer_id": "sample-host-id",
"instance_id": "instance-id",
"db_map": []
}
diff --git a/influxdb3_server/src/lib.rs b/influxdb3_server/src/lib.rs
index 05be145fec..8999163e31 100644
--- a/influxdb3_server/src/lib.rs
+++ b/influxdb3_server/src/lib.rs
@@ -760,9 +760,9 @@ mod tests {
DedicatedExecutor::new_testing(),
));
let persister = Arc::new(Persister::new(Arc::clone(&object_store), "test_host"));
- let sample_host_id = Arc::from("sample-host-id");
+ let sample_writer_id = Arc::from("sample-host-id");
let instance_id = Arc::from("sample-instance-id");
- let catalog = Arc::new(Catalog::new(sample_host_id, instance_id));
+ let catalog = Arc::new(Catalog::new(sample_writer_id, instance_id));
let write_buffer_impl = influxdb3_write::write_buffer::WriteBufferImpl::new(
influxdb3_write::write_buffer::WriteBufferImplArgs {
persister: Arc::clone(&persister),
diff --git a/influxdb3_server/src/query_executor/mod.rs b/influxdb3_server/src/query_executor/mod.rs
index bbb58029f8..493350431e 100644
--- a/influxdb3_server/src/query_executor/mod.rs
+++ b/influxdb3_server/src/query_executor/mod.rs
@@ -738,9 +738,9 @@ mod tests {
);
let persister = Arc::new(Persister::new(Arc::clone(&object_store), "test_host"));
let exec = make_exec(Arc::clone(&object_store));
- let host_id = Arc::from("sample-host-id");
+ let writer_id = Arc::from("sample-host-id");
let instance_id = Arc::from("instance-id");
- let catalog = Arc::new(Catalog::new(host_id, instance_id));
+ let catalog = Arc::new(Catalog::new(writer_id, instance_id));
let write_buffer_impl = WriteBufferImpl::new(WriteBufferImplArgs {
persister,
catalog: Arc::clone(&catalog),
diff --git a/influxdb3_wal/src/object_store.rs b/influxdb3_wal/src/object_store.rs
index 22d7a43051..2116ac8f00 100644
--- a/influxdb3_wal/src/object_store.rs
+++ b/influxdb3_wal/src/object_store.rs
@@ -20,7 +20,7 @@ use tokio::sync::{oneshot, OwnedSemaphorePermit, Semaphore};
#[derive(Debug)]
pub struct WalObjectStore {
object_store: Arc<dyn ObjectStore>,
- host_identifier_prefix: String,
+ writer_identifier_prefix: String,
file_notifier: Arc<dyn WalFileNotifier>,
added_file_notifiers: parking_lot::Mutex<Vec<Arc<dyn WalFileNotifier>>>,
/// Buffered wal ops go in here along with the state to track when to snapshot
@@ -37,21 +37,21 @@ impl WalObjectStore {
pub async fn new(
time_provider: Arc<dyn TimeProvider>,
object_store: Arc<dyn ObjectStore>,
- host_identifier_prefix: impl Into<String> + Send,
+ writer_identifier_prefix: impl Into<String> + Send,
file_notifier: Arc<dyn WalFileNotifier>,
config: WalConfig,
last_wal_sequence_number: Option<WalFileSequenceNumber>,
last_snapshot_sequence_number: Option<SnapshotSequenceNumber>,
snapshotted_wal_files_to_keep: u64,
) -> Result<Arc<Self>, crate::Error> {
- let host_identifier = host_identifier_prefix.into();
+ let writer_identifier = writer_identifier_prefix.into();
let all_wal_file_paths =
- load_all_wal_file_paths(Arc::clone(&object_store), host_identifier.clone()).await?;
+ load_all_wal_file_paths(Arc::clone(&object_store), writer_identifier.clone()).await?;
let flush_interval = config.flush_interval;
let wal = Self::new_without_replay(
time_provider,
object_store,
- host_identifier,
+ writer_identifier,
file_notifier,
config,
last_wal_sequence_number,
@@ -72,7 +72,7 @@ impl WalObjectStore {
fn new_without_replay(
time_provider: Arc<dyn TimeProvider>,
object_store: Arc<dyn ObjectStore>,
- host_identifier_prefix: impl Into<String>,
+ writer_identifier_prefix: impl Into<String>,
file_notifier: Arc<dyn WalFileNotifier>,
config: WalConfig,
last_wal_sequence_number: Option<WalFileSequenceNumber>,
@@ -85,7 +85,7 @@ impl WalObjectStore {
Self {
object_store,
- host_identifier_prefix: host_identifier_prefix.into(),
+ writer_identifier_prefix: writer_identifier_prefix.into(),
file_notifier,
added_file_notifiers: Default::default(),
flush_buffer: Mutex::new(FlushBuffer::new(
@@ -272,7 +272,7 @@ impl WalObjectStore {
.await
};
info!(
- host = self.host_identifier_prefix,
+ host = self.writer_identifier_prefix,
n_ops = %wal_contents.ops.len(),
min_timestamp_ns = %wal_contents.min_timestamp_ns,
max_timestamp_ns = %wal_contents.max_timestamp_ns,
@@ -280,7 +280,7 @@ impl WalObjectStore {
"flushing WAL buffer to object store"
);
- let wal_path = wal_path(&self.host_identifier_prefix, wal_contents.wal_file_number);
+ let wal_path = wal_path(&self.writer_identifier_prefix, wal_contents.wal_file_number);
let data = crate::serialize::serialize_to_file_bytes(&wal_contents)
.expect("unable to serialize wal contents into bytes for file");
let data = Bytes::from(data);
@@ -373,7 +373,7 @@ impl WalObjectStore {
// that came before it:
if let Some(last_wal_sequence_number) = last_wal_sequence_number {
let last_wal_path =
- wal_path(&self.host_identifier_prefix, last_wal_sequence_number);
+ wal_path(&self.writer_identifier_prefix, last_wal_sequence_number);
debug!(
?path,
?last_wal_path,
@@ -422,7 +422,7 @@ impl WalObjectStore {
for idx in oldest..last_to_delete {
let path = wal_path(
- &self.host_identifier_prefix,
+ &self.writer_identifier_prefix,
WalFileSequenceNumber::new(idx),
);
debug!(?path, ">>> deleting wal file");
@@ -471,11 +471,11 @@ fn oldest_wal_file_num(all_wal_file_paths: &[Path]) -> Option<WalFileSequenceNum
async fn load_all_wal_file_paths(
object_store: Arc<dyn ObjectStore>,
- host_identifier_prefix: String,
+ writer_identifier_prefix: String,
) -> Result<Vec<Path>, crate::Error> {
let mut paths = Vec::new();
let mut offset: Option<Path> = None;
- let path = Path::from(format!("{host}/wal", host = host_identifier_prefix));
+ let path = Path::from(format!("{writer}/wal", writer = writer_identifier_prefix));
loop {
let mut listing = if let Some(offset) = offset {
object_store.list_with_offset(Some(&path), &offset)
@@ -807,9 +807,9 @@ impl WalBuffer {
}
}
-pub fn wal_path(host_identifier_prefix: &str, wal_file_number: WalFileSequenceNumber) -> Path {
+pub fn wal_path(writer_identifier_prefix: &str, wal_file_number: WalFileSequenceNumber) -> Path {
Path::from(format!(
- "{host_identifier_prefix}/wal/{:011}.wal",
+ "{writer_identifier_prefix}/wal/{:011}.wal",
wal_file_number.0
))
}
diff --git a/influxdb3_write/src/lib.rs b/influxdb3_write/src/lib.rs
index d6faca0c96..6bf411e348 100644
--- a/influxdb3_write/src/lib.rs
+++ b/influxdb3_write/src/lib.rs
@@ -186,8 +186,8 @@ pub struct BufferedWriteRequest {
/// The collection of Parquet files that were persisted in a snapshot
#[derive(Debug, Serialize, Deserialize, Eq, PartialEq, Clone)]
pub struct PersistedSnapshot {
- /// The host identifier that persisted this snapshot
- pub host_id: String,
+ /// The writer identifier that persisted this snapshot
+ pub writer_id: String,
/// The next file id to be used with `ParquetFile`s when the snapshot is loaded
pub next_file_id: ParquetFileId,
/// The next db id to be used for databases when the snapshot is loaded
@@ -217,13 +217,13 @@ pub struct PersistedSnapshot {
impl PersistedSnapshot {
pub fn new(
- host_id: String,
+ writer_id: String,
snapshot_sequence_number: SnapshotSequenceNumber,
wal_file_sequence_number: WalFileSequenceNumber,
catalog_sequence_number: CatalogSequenceNumber,
) -> Self {
Self {
- host_id,
+ writer_id,
next_file_id: ParquetFileId::next_id(),
next_db_id: DbId::next_id(),
next_table_id: TableId::next_id(),
@@ -500,7 +500,7 @@ mod tests {
// add dbs_1 to snapshot
let persisted_snapshot_1 = PersistedSnapshot {
- host_id: host.to_string(),
+ writer_id: host.to_string(),
next_file_id: ParquetFileId::from(0),
next_db_id: DbId::from(1),
next_table_id: TableId::from(1),
@@ -545,7 +545,7 @@ mod tests {
// add dbs_2 to snapshot
let persisted_snapshot_2 = PersistedSnapshot {
- host_id: host.to_string(),
+ writer_id: host.to_string(),
next_file_id: ParquetFileId::from(5),
next_db_id: DbId::from(2),
next_table_id: TableId::from(22),
diff --git a/influxdb3_write/src/persister.rs b/influxdb3_write/src/persister.rs
index c0153f2d7b..1c71626314 100644
--- a/influxdb3_write/src/persister.rs
+++ b/influxdb3_write/src/persister.rs
@@ -84,19 +84,19 @@ pub struct Persister {
/// The interface to the object store being used
object_store: Arc<dyn ObjectStore>,
/// Prefix used for all paths in the object store for this persister
- host_identifier_prefix: String,
+ writer_identifier_prefix: String,
pub(crate) mem_pool: Arc<dyn MemoryPool>,
}
impl Persister {
pub fn new(
object_store: Arc<dyn ObjectStore>,
- host_identifier_prefix: impl Into<String>,
+ writer_identifier_prefix: impl Into<String>,
) -> Self {
Self {
object_store_url: ObjectStoreUrl::parse(DEFAULT_OBJECT_STORE_URL).unwrap(),
object_store,
- host_identifier_prefix: host_identifier_prefix.into(),
+ writer_identifier_prefix: writer_identifier_prefix.into(),
mem_pool: Arc::new(UnboundedMemoryPool::default()),
}
}
@@ -114,8 +114,8 @@ impl Persister {
}
/// Get the host identifier prefix
- pub fn host_identifier_prefix(&self) -> &str {
- &self.host_identifier_prefix
+ pub fn writer_identifier_prefix(&self) -> &str {
+ &self.writer_identifier_prefix
}
/// Try loading the catalog, if there is no catalog generate new
@@ -127,8 +127,10 @@ impl Persister {
let uuid = Uuid::new_v4().to_string();
let instance_id = Arc::from(uuid.as_str());
info!(instance_id = ?instance_id, "Catalog not found, creating new instance id");
- let new_catalog =
- Catalog::new(Arc::from(self.host_identifier_prefix.as_str()), instance_id);
+ let new_catalog = Catalog::new(
+ Arc::from(self.writer_identifier_prefix.as_str()),
+ instance_id,
+ );
self.persist_catalog(&new_catalog).await?;
new_catalog
}
@@ -142,7 +144,7 @@ impl Persister {
pub async fn load_catalog(&self) -> Result<Option<InnerCatalog>> {
let mut list = self
.object_store
- .list(Some(&CatalogFilePath::dir(&self.host_identifier_prefix)));
+ .list(Some(&CatalogFilePath::dir(&self.writer_identifier_prefix)));
let mut catalog_path: Option<ObjPath> = None;
while let Some(item) = list.next().await {
let item = item?;
@@ -202,12 +204,12 @@ impl Persister {
let mut snapshot_list = if let Some(offset) = offset {
self.object_store.list_with_offset(
- Some(&SnapshotInfoFilePath::dir(&self.host_identifier_prefix)),
+ Some(&SnapshotInfoFilePath::dir(&self.writer_identifier_prefix)),
&offset,
)
} else {
self.object_store.list(Some(&SnapshotInfoFilePath::dir(
- &self.host_identifier_prefix,
+ &self.writer_identifier_prefix,
)))
};
@@ -271,7 +273,7 @@ impl Persister {
/// be the catalog that is returned the next time `load_catalog` is called.
pub async fn persist_catalog(&self, catalog: &Catalog) -> Result<()> {
let catalog_path = CatalogFilePath::new(
- self.host_identifier_prefix.as_str(),
+ self.writer_identifier_prefix.as_str(),
catalog.sequence_number(),
);
let json = serde_json::to_vec_pretty(&catalog)?;
@@ -280,7 +282,7 @@ impl Persister {
.await?;
// It's okay if this fails as it's just cleanup of the old catalog
// a new persist will come in and clean the old one up.
- let prefix = self.host_identifier_prefix.clone();
+ let prefix = self.writer_identifier_prefix.clone();
let obj_store = Arc::clone(&self.object_store);
tokio::spawn(async move {
let mut items = Vec::new();
@@ -309,7 +311,7 @@ impl Persister {
/// Persists the snapshot file
pub async fn persist_snapshot(&self, persisted_snapshot: &PersistedSnapshot) -> Result<()> {
let snapshot_file_path = SnapshotInfoFilePath::new(
- self.host_identifier_prefix.as_str(),
+ self.writer_identifier_prefix.as_str(),
persisted_snapshot.snapshot_sequence_number,
);
let json = serde_json::to_vec_pretty(persisted_snapshot)?;
@@ -456,12 +458,12 @@ mod tests {
#[tokio::test]
async fn persist_catalog() {
- let host_id = Arc::from("sample-host-id");
+ let writer_id = Arc::from("sample-host-id");
let instance_id = Arc::from("sample-instance-id");
let local_disk =
LocalFileSystem::new_with_prefix(test_helpers::tmp_dir().unwrap()).unwrap();
let persister = Persister::new(Arc::new(local_disk), "test_host");
- let catalog = Catalog::new(host_id, instance_id);
+ let catalog = Catalog::new(writer_id, instance_id);
let _ = catalog.db_or_create("my_db");
persister.persist_catalog(&catalog).await.unwrap();
@@ -469,13 +471,13 @@ mod tests {
#[tokio::test]
async fn persist_catalog_with_cleanup() {
- let host_id = Arc::from("sample-host-id");
+ let writer_id = Arc::from("sample-host-id");
let instance_id = Arc::from("sample-instance-id");
let prefix = test_helpers::tmp_dir().unwrap();
let local_disk = LocalFileSystem::new_with_prefix(prefix).unwrap();
let obj_store: Arc<dyn ObjectStore> = Arc::new(local_disk);
let persister = Persister::new(Arc::clone(&obj_store), "test_host");
- let catalog = Catalog::new(Arc::clone(&host_id), instance_id);
+ let catalog = Catalog::new(Arc::clone(&writer_id), instance_id);
persister.persist_catalog(&catalog).await.unwrap();
let db_schema = catalog.db_or_create("my_db_1").unwrap();
persister.persist_catalog(&catalog).await.unwrap();
@@ -587,17 +589,17 @@ mod tests {
#[tokio::test]
async fn persist_and_load_newest_catalog() {
- let host_id: Arc<str> = Arc::from("sample-host-id");
+ let writer_id: Arc<str> = Arc::from("sample-host-id");
let instance_id: Arc<str> = Arc::from("sample-instance-id");
let local_disk =
LocalFileSystem::new_with_prefix(test_helpers::tmp_dir().unwrap()).unwrap();
let persister = Persister::new(Arc::new(local_disk), "test_host");
- let catalog = Catalog::new(host_id.clone(), instance_id.clone());
+ let catalog = Catalog::new(writer_id.clone(), instance_id.clone());
let _ = catalog.db_or_create("my_db");
persister.persist_catalog(&catalog).await.unwrap();
- let catalog = Catalog::new(host_id.clone(), instance_id.clone());
+ let catalog = Catalog::new(writer_id.clone(), instance_id.clone());
let _ = catalog.db_or_create("my_second_db");
persister.persist_catalog(&catalog).await.unwrap();
@@ -620,7 +622,7 @@ mod tests {
LocalFileSystem::new_with_prefix(test_helpers::tmp_dir().unwrap()).unwrap();
let persister = Persister::new(Arc::new(local_disk), "test_host");
let info_file = PersistedSnapshot {
- host_id: "test_host".to_string(),
+ writer_id: "test_host".to_string(),
next_file_id: ParquetFileId::from(0),
next_db_id: DbId::from(1),
next_table_id: TableId::from(1),
@@ -644,7 +646,7 @@ mod tests {
LocalFileSystem::new_with_prefix(test_helpers::tmp_dir().unwrap()).unwrap();
let persister = Persister::new(Arc::new(local_disk), "test_host");
let info_file = PersistedSnapshot {
- host_id: "test_host".to_string(),
+ writer_id: "test_host".to_string(),
next_file_id: ParquetFileId::from(0),
next_db_id: DbId::from(1),
next_table_id: TableId::from(1),
@@ -659,7 +661,7 @@ mod tests {
parquet_size_bytes: 0,
};
let info_file_2 = PersistedSnapshot {
- host_id: "test_host".to_string(),
+ writer_id: "test_host".to_string(),
next_file_id: ParquetFileId::from(1),
next_db_id: DbId::from(1),
next_table_id: TableId::from(1),
@@ -674,7 +676,7 @@ mod tests {
parquet_size_bytes: 0,
};
let info_file_3 = PersistedSnapshot {
- host_id: "test_host".to_string(),
+ writer_id: "test_host".to_string(),
next_file_id: ParquetFileId::from(2),
next_db_id: DbId::from(1),
next_table_id: TableId::from(1),
@@ -710,7 +712,7 @@ mod tests {
LocalFileSystem::new_with_prefix(test_helpers::tmp_dir().unwrap()).unwrap();
let persister = Persister::new(Arc::new(local_disk), "test_host");
let info_file = PersistedSnapshot {
- host_id: "test_host".to_string(),
+ writer_id: "test_host".to_string(),
next_file_id: ParquetFileId::from(0),
next_db_id: DbId::from(1),
next_table_id: TableId::from(1),
@@ -739,7 +741,7 @@ mod tests {
let persister = Persister::new(Arc::new(local_disk), "test_host");
for id in 0..1001 {
let info_file = PersistedSnapshot {
- host_id: "test_host".to_string(),
+ writer_id: "test_host".to_string(),
next_file_id: ParquetFileId::from(id),
next_db_id: DbId::from(1),
next_table_id: TableId::from(1),
@@ -866,7 +868,7 @@ mod tests {
]
.into();
let snapshot = PersistedSnapshot {
- host_id: "host".to_string(),
+ writer_id: "host".to_string(),
next_file_id: ParquetFileId::new(),
next_db_id: DbId::new(),
next_table_id: TableId::new(),
@@ -971,7 +973,7 @@ mod tests {
{
"databases": [],
"sequence": 0,
- "host_id": "test_host",
+ "writer_id": "test_host",
"instance_id": "24b1e1bf-b301-4101-affa-e3d668fe7d20",
"db_map": [],
"table_map": []
diff --git a/influxdb3_write/src/snapshots/influxdb3_write__persister__tests__persisted_snapshot_structure.snap b/influxdb3_write/src/snapshots/influxdb3_write__persister__tests__persisted_snapshot_structure.snap
index 6df66c1e61..137a11b737 100644
--- a/influxdb3_write/src/snapshots/influxdb3_write__persister__tests__persisted_snapshot_structure.snap
+++ b/influxdb3_write/src/snapshots/influxdb3_write__persister__tests__persisted_snapshot_structure.snap
@@ -3,7 +3,7 @@ source: influxdb3_write/src/persister.rs
expression: snapshot
---
{
- "host_id": "host",
+ "writer_id": "host",
"next_file_id": 8,
"next_db_id": 2,
"next_table_id": 4,
diff --git a/influxdb3_write/src/write_buffer/mod.rs b/influxdb3_write/src/write_buffer/mod.rs
index 7e7cb00739..5a897ed161 100644
--- a/influxdb3_write/src/write_buffer/mod.rs
+++ b/influxdb3_write/src/write_buffer/mod.rs
@@ -216,7 +216,7 @@ impl WriteBufferImpl {
let wal = WalObjectStore::new(
Arc::clone(&time_provider),
persister.object_store(),
- persister.host_identifier_prefix(),
+ persister.writer_identifier_prefix(),
Arc::clone(&queryable_buffer) as Arc<dyn WalFileNotifier>,
wal_config,
last_wal_sequence_number,
@@ -883,9 +883,9 @@ mod tests {
#[test]
fn parse_lp_into_buffer() {
- let host_id = Arc::from("sample-host-id");
+ let writer_id = Arc::from("sample-host-id");
let instance_id = Arc::from("sample-instance-id");
- let catalog = Arc::new(Catalog::new(host_id, instance_id));
+ let catalog = Arc::new(Catalog::new(writer_id, instance_id));
let db_name = NamespaceName::new("foo").unwrap();
let lp = "cpu,region=west user=23.2 100\nfoo f1=1i";
WriteValidator::initialize(db_name, Arc::clone(&catalog), 0)
diff --git a/influxdb3_write/src/write_buffer/queryable_buffer.rs b/influxdb3_write/src/write_buffer/queryable_buffer.rs
index dd5dee333f..dcb1c1c50b 100644
--- a/influxdb3_write/src/write_buffer/queryable_buffer.rs
+++ b/influxdb3_write/src/write_buffer/queryable_buffer.rs
@@ -209,7 +209,7 @@ impl QueryableBuffer {
table_name: Arc::clone(&table_name),
chunk_time: chunk.chunk_time,
path: ParquetFilePath::new(
- self.persister.host_identifier_prefix(),
+ self.persister.writer_identifier_prefix(),
db_schema.name.as_ref(),
database_id.as_u32(),
table_name.as_ref(),
@@ -277,7 +277,7 @@ impl QueryableBuffer {
);
// persist the individual files, building the snapshot as we go
let mut persisted_snapshot = PersistedSnapshot::new(
- persister.host_identifier_prefix().to_string(),
+ persister.writer_identifier_prefix().to_string(),
snapshot_details.snapshot_sequence_number,
snapshot_details.last_wal_sequence_number,
catalog.sequence_number(),
diff --git a/influxdb3_write/src/write_buffer/snapshots/influxdb3_write__write_buffer__tests__catalog-after-last-cache-create-and-new-field.snap b/influxdb3_write/src/write_buffer/snapshots/influxdb3_write__write_buffer__tests__catalog-after-last-cache-create-and-new-field.snap
index 9cceaf121c..a8b14fd8e0 100644
--- a/influxdb3_write/src/write_buffer/snapshots/influxdb3_write__write_buffer__tests__catalog-after-last-cache-create-and-new-field.snap
+++ b/influxdb3_write/src/write_buffer/snapshots/influxdb3_write__write_buffer__tests__catalog-after-last-cache-create-and-new-field.snap
@@ -1,7 +1,6 @@
---
source: influxdb3_write/src/write_buffer/mod.rs
expression: catalog_json
-snapshot_kind: text
---
{
"databases": [
@@ -100,7 +99,7 @@ snapshot_kind: text
"name": "db"
}
],
- "host_id": "test_host",
"instance_id": "[uuid]",
- "sequence": 3
+ "sequence": 3,
+ "writer_id": "test_host"
}
diff --git a/influxdb3_write/src/write_buffer/snapshots/influxdb3_write__write_buffer__tests__catalog-immediately-after-last-cache-create.snap b/influxdb3_write/src/write_buffer/snapshots/influxdb3_write__write_buffer__tests__catalog-immediately-after-last-cache-create.snap
index 9704b561c6..5734704063 100644
--- a/influxdb3_write/src/write_buffer/snapshots/influxdb3_write__write_buffer__tests__catalog-immediately-after-last-cache-create.snap
+++ b/influxdb3_write/src/write_buffer/snapshots/influxdb3_write__write_buffer__tests__catalog-immediately-after-last-cache-create.snap
@@ -1,7 +1,6 @@
---
source: influxdb3_write/src/write_buffer/mod.rs
expression: catalog_json
-snapshot_kind: text
---
{
"databases": [
@@ -90,7 +89,7 @@ snapshot_kind: text
"name": "db"
}
],
- "host_id": "test_host",
"instance_id": "[uuid]",
- "sequence": 2
+ "sequence": 2,
+ "writer_id": "test_host"
}
diff --git a/influxdb3_write/src/write_buffer/snapshots/influxdb3_write__write_buffer__tests__catalog-immediately-after-last-cache-delete.snap b/influxdb3_write/src/write_buffer/snapshots/influxdb3_write__write_buffer__tests__catalog-immediately-after-last-cache-delete.snap
index 82f1da254a..131742beef 100644
--- a/influxdb3_write/src/write_buffer/snapshots/influxdb3_write__write_buffer__tests__catalog-immediately-after-last-cache-delete.snap
+++ b/influxdb3_write/src/write_buffer/snapshots/influxdb3_write__write_buffer__tests__catalog-immediately-after-last-cache-delete.snap
@@ -1,7 +1,6 @@
---
source: influxdb3_write/src/write_buffer/mod.rs
expression: catalog_json
-snapshot_kind: text
---
{
"databases": [
@@ -87,7 +86,7 @@ snapshot_kind: text
"name": "db"
}
],
- "host_id": "test_host",
"instance_id": "[uuid]",
- "sequence": 4
+ "sequence": 4,
+ "writer_id": "test_host"
}
diff --git a/influxdb3_write/src/write_buffer/validator.rs b/influxdb3_write/src/write_buffer/validator.rs
index 430478d2ab..e602325ef1 100644
--- a/influxdb3_write/src/write_buffer/validator.rs
+++ b/influxdb3_write/src/write_buffer/validator.rs
@@ -508,10 +508,10 @@ mod tests {
#[test]
fn write_validator_v1() -> Result<(), Error> {
- let host_id = Arc::from("sample-host-id");
+ let writer_id = Arc::from("sample-host-id");
let instance_id = Arc::from("sample-instance-id");
let namespace = NamespaceName::new("test").unwrap();
- let catalog = Arc::new(Catalog::new(host_id, instance_id));
+ let catalog = Arc::new(Catalog::new(writer_id, instance_id));
let result = WriteValidator::initialize(namespace.clone(), Arc::clone(&catalog), 0)
.unwrap()
.v1_parse_lines_and_update_schema(
|
bbfff8699c010a8496f3797a253826936b00a2c0
|
Carol (Nichols || Goulding)
|
2023-02-27 11:57:44
|
Use the same normalization code for explain tests as e2e tests do
|
The regex for replacing UUIDs needed to be changed like the normalizer's
regex did, so keep them in sync by using the same code.
This might point to the normalizer needing to be moved somewhere else,
or changing these tests to be e2e?
| null |
fix: Use the same normalization code for explain tests as e2e tests do
The regex for replacing UUIDs needed to be changed like the normalizer's
regex did, so keep them in sync by using the same code.
This might point to the normalizer needing to be moved somewhere else,
or changing these tests to be e2e?
|
diff --git a/Cargo.lock b/Cargo.lock
index e383b52036..8e4eb90655 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -4513,7 +4513,6 @@ dependencies = [
"predicate",
"prost",
"rand",
- "regex",
"schema",
"service_common",
"service_grpc_catalog",
@@ -4522,6 +4521,7 @@ dependencies = [
"sharder",
"snafu",
"test_helpers",
+ "test_helpers_end_to_end",
"thiserror",
"tokio",
"tokio-util",
diff --git a/querier/Cargo.toml b/querier/Cargo.toml
index 5bdb8d69c1..90689d93d1 100644
--- a/querier/Cargo.toml
+++ b/querier/Cargo.toml
@@ -56,5 +56,5 @@ insta = { version = "1.28.0", features = ["yaml"] }
iox_tests = { path = "../iox_tests" }
mutable_batch_lp = { path = "../mutable_batch_lp" }
object_store_metrics = { path = "../object_store_metrics" }
-regex = "1.7.1"
test_helpers = { path = "../test_helpers" }
+test_helpers_end_to_end = { path = "../test_helpers_end_to_end" }
diff --git a/querier/src/namespace/query_access.rs b/querier/src/namespace/query_access.rs
index 36a11f0f5c..e7221686a5 100644
--- a/querier/src/namespace/query_access.rs
+++ b/querier/src/namespace/query_access.rs
@@ -205,8 +205,8 @@ mod tests {
use iox_query::frontend::sql::SqlQueryPlanner;
use iox_tests::{TestCatalog, TestParquetFileBuilder};
use metric::{Observation, RawReporter};
- use regex::Regex;
use snafu::{ResultExt, Snafu};
+ use test_helpers_end_to_end::snapshot_comparison::normalization::Normalizer;
use trace::{span::SpanStatus, RingBufferTraceCollector};
#[tokio::test]
@@ -492,13 +492,13 @@ mod tests {
format_explain(&querier_namespace, "EXPLAIN SELECT * FROM cpu").await,
@r###"
---
- - +---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
- - "| plan_type | plan |"
- - +---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
- - "| logical_plan | TableScan: cpu projection=[foo, host, load, time] |"
- - "| physical_plan | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/<uuid>.parquet, 1/1/1/1/<uuid>.parquet, 1/1/1/1/<uuid>.parquet, 1/1/2/2/<uuid>.parquet, 1/1/1/3/<uuid>.parquet]]}, projection=[foo, host, load, time] |"
- - "| | |"
- - +---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+ - "----------"
+ - "| plan_type | plan |"
+ - "----------"
+ - "| logical_plan | TableScan: cpu projection=[foo, host, load, time] |"
+ - "| physical_plan | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet, 1/1/1/1/00000000-0000-0000-0000-000000000001.parquet, 1/1/1/1/00000000-0000-0000-0000-000000000002.parquet, 1/1/1/1/00000000-0000-0000-0000-000000000003.parquet, 1/1/1/1/00000000-0000-0000-0000-000000000004.parquet]]}, projection=[foo, host, load, time] |"
+ - "| | |"
+ - "----------"
"###
);
@@ -509,22 +509,22 @@ mod tests {
format_explain(&querier_namespace, "EXPLAIN SELECT * FROM mem ORDER BY host,time").await,
@r###"
---
- - +---------------+--------------------------------------------------------------------------------------------------------------------------------------------------+
- - "| plan_type | plan |"
- - +---------------+--------------------------------------------------------------------------------------------------------------------------------------------------+
- - "| logical_plan | Sort: mem.host ASC NULLS LAST, mem.time ASC NULLS LAST |"
- - "| | TableScan: mem projection=[host, perc, time] |"
- - "| physical_plan | SortExec: expr=[host@0 ASC NULLS LAST,time@2 ASC NULLS LAST] |"
- - "| | CoalescePartitionsExec |"
- - "| | UnionExec |"
- - "| | CoalesceBatchesExec: target_batch_size=8192 |"
- - "| | FilterExec: time@2 < 1 OR time@2 > 13 OR NOT host@0 = CAST(d AS Dictionary(Int32, Utf8)) |"
- - "| | ParquetExec: limit=None, partitions={1 group: [[1/2/1/4/<uuid>.parquet]]}, projection=[host, perc, time] |"
- - "| | CoalesceBatchesExec: target_batch_size=8192 |"
- - "| | FilterExec: time@2 < 1 OR time@2 > 13 OR NOT host@0 = CAST(d AS Dictionary(Int32, Utf8)) |"
- - "| | ParquetExec: limit=None, partitions={1 group: [[1/2/1/4/<uuid>.parquet]]}, projection=[host, perc, time] |"
- - "| | |"
- - +---------------+--------------------------------------------------------------------------------------------------------------------------------------------------+
+ - "----------"
+ - "| plan_type | plan |"
+ - "----------"
+ - "| logical_plan | Sort: mem.host ASC NULLS LAST, mem.time ASC NULLS LAST |"
+ - "| | TableScan: mem projection=[host, perc, time] |"
+ - "| physical_plan | SortExec: expr=[host@0 ASC NULLS LAST,time@2 ASC NULLS LAST] |"
+ - "| | CoalescePartitionsExec |"
+ - "| | UnionExec |"
+ - "| | CoalesceBatchesExec: target_batch_size=8192 |"
+ - "| | FilterExec: time@2 < 1 OR time@2 > 13 OR NOT host@0 = CAST(d AS Dictionary(Int32, Utf8)) |"
+ - "| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, projection=[host, perc, time] |"
+ - "| | CoalesceBatchesExec: target_batch_size=8192 |"
+ - "| | FilterExec: time@2 < 1 OR time@2 > 13 OR NOT host@0 = CAST(d AS Dictionary(Int32, Utf8)) |"
+ - "| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet]]}, projection=[host, perc, time] |"
+ - "| | |"
+ - "----------"
"###
);
@@ -567,19 +567,19 @@ mod tests {
format_explain(&querier_namespace, "EXPLAIN SELECT * FROM cpu").await,
@r###"
---
- - +---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
- - "| plan_type | plan |"
- - +---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
- - "| logical_plan | TableScan: cpu projection=[foo, host, load, time] |"
- - "| physical_plan | UnionExec |"
- - "| | DeduplicateExec: [host@1 ASC,time@3 ASC] |"
- - "| | SortPreservingMergeExec: [host@1 ASC,time@3 ASC] |"
- - "| | UnionExec |"
- - "| | ParquetExec: limit=None, partitions={1 group: [[1/1/2/2/<uuid>.parquet]]}, output_ordering=[host@1 ASC, time@3 ASC], projection=[foo, host, load, time] |"
- - "| | ParquetExec: limit=None, partitions={1 group: [[1/1/2/2/<uuid>.parquet]]}, output_ordering=[host@1 ASC, time@3 ASC], projection=[foo, host, load, time] |"
- - "| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/<uuid>.parquet, 1/1/1/1/<uuid>.parquet, 1/1/1/1/<uuid>.parquet, 1/1/1/3/<uuid>.parquet]]}, projection=[foo, host, load, time] |"
- - "| | |"
- - +---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+ - "----------"
+ - "| plan_type | plan |"
+ - "----------"
+ - "| logical_plan | TableScan: cpu projection=[foo, host, load, time] |"
+ - "| physical_plan | UnionExec |"
+ - "| | DeduplicateExec: [host@1 ASC,time@3 ASC] |"
+ - "| | SortPreservingMergeExec: [host@1 ASC,time@3 ASC] |"
+ - "| | UnionExec |"
+ - "| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, output_ordering=[host@1 ASC, time@3 ASC], projection=[foo, host, load, time] |"
+ - "| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet]]}, output_ordering=[host@1 ASC, time@3 ASC], projection=[foo, host, load, time] |"
+ - "| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000002.parquet, 1/1/1/1/00000000-0000-0000-0000-000000000003.parquet, 1/1/1/1/00000000-0000-0000-0000-000000000004.parquet, 1/1/1/1/00000000-0000-0000-0000-000000000005.parquet]]}, projection=[foo, host, load, time] |"
+ - "| | |"
+ - "----------"
"###
);
}
@@ -599,15 +599,11 @@ mod tests {
async fn format_explain(querier_namespace: &Arc<QuerierNamespace>, sql: &str) -> Vec<String> {
let results = run(querier_namespace, sql, None).await;
- let formatted = arrow_util::display::pretty_format_batches(&results).unwrap();
-
- let regex = Regex::new("[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}")
- .expect("UUID regex");
- formatted
- .trim()
- .split('\n')
- .map(|s| regex.replace_all(s, "<uuid>").to_string())
- .collect::<Vec<_>>()
+ let normalizer = Normalizer {
+ normalized_uuids: true,
+ ..Default::default()
+ };
+ normalizer.normalize_results(results)
}
async fn run(
diff --git a/test_helpers_end_to_end/src/lib.rs b/test_helpers_end_to_end/src/lib.rs
index 152e4124e7..6e58e27f64 100644
--- a/test_helpers_end_to_end/src/lib.rs
+++ b/test_helpers_end_to_end/src/lib.rs
@@ -13,7 +13,7 @@ mod grpc;
mod mini_cluster;
mod server_fixture;
mod server_type;
-mod snapshot_comparison;
+pub mod snapshot_comparison;
mod steps;
mod udp_listener;
diff --git a/test_helpers_end_to_end/src/snapshot_comparison.rs b/test_helpers_end_to_end/src/snapshot_comparison.rs
index 118b33db54..4ec43d2c6f 100644
--- a/test_helpers_end_to_end/src/snapshot_comparison.rs
+++ b/test_helpers_end_to_end/src/snapshot_comparison.rs
@@ -1,4 +1,4 @@
-mod normalization;
+pub mod normalization;
mod queries;
use crate::snapshot_comparison::queries::TestQueries;
|
4521516147792fe5f8d8efe3bdd4dc2741345a6c
|
Marco Neumann
|
2023-01-24 17:53:47
|
add per-partition timeout (#6686)
|
It seems that prod was hanging last night. This is pretty hard to debug
and in general we should protect the compactor against hanging /
malformed partitions that take forever. This is similar to the fact that
the querier also has a timeout for every query. Let's see if this shows
anything in prod (and if not it's still a desired safety net).
|
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
feat: add per-partition timeout (#6686)
It seems that prod was hanging last night. This is pretty hard to debug
and in general we should protect the compactor against hanging /
malformed partitions that take forever. This is similar to the fact that
the querier also has a timeout for every query. Let's see if this shows
anything in prod (and if not it's still a desired safety net).
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
diff --git a/clap_blocks/src/compactor2.rs b/clap_blocks/src/compactor2.rs
index cb2fc27baf..91d6582ad9 100644
--- a/clap_blocks/src/compactor2.rs
+++ b/clap_blocks/src/compactor2.rs
@@ -85,4 +85,13 @@ pub struct Compactor2Config {
action
)]
pub split_percentage: u16,
+
+ /// Maximum duration of the per-partition compaction task in seconds.
+ #[clap(
+ long = "compaction-partition-timeout-secs",
+ env = "INFLUXDB_IOX_COMPACTION_PARTITION_TIMEOUT_SECS",
+ default_value = "1800",
+ action
+ )]
+ pub partition_timeout_secs: u64,
}
diff --git a/compactor2/src/compactor.rs b/compactor2/src/compactor.rs
index 73e47fdd01..1bb0bbeffb 100644
--- a/compactor2/src/compactor.rs
+++ b/compactor2/src/compactor.rs
@@ -1,5 +1,5 @@
//! Main compactor entry point.
-use std::sync::Arc;
+use std::{sync::Arc, time::Duration};
use futures::{
future::{BoxFuture, Shared},
@@ -44,7 +44,7 @@ impl Compactor2 {
_ = shutdown_captured.cancelled() => {}
_ = async {
loop {
- compact(config.partition_concurrency, &components).await;
+ compact(config.partition_concurrency, Duration::from_secs(config.partition_timeout_secs), &components).await;
// TODO: implement throttling if there was no work to do
}
} => unreachable!(),
diff --git a/compactor2/src/compactor_tests.rs b/compactor2/src/compactor_tests.rs
index 67991daf9d..620f59a065 100644
--- a/compactor2/src/compactor_tests.rs
+++ b/compactor2/src/compactor_tests.rs
@@ -1,6 +1,6 @@
#[cfg(test)]
mod tests {
- use std::{num::NonZeroUsize, sync::Arc};
+ use std::{num::NonZeroUsize, sync::Arc, time::Duration};
use arrow_util::assert_batches_sorted_eq;
use data_types::CompactionLevel;
@@ -22,7 +22,12 @@ mod tests {
// compact
let config = Arc::clone(&setup.config);
let components = hardcoded_components(&config);
- compact(NonZeroUsize::new(10).unwrap(), &components).await;
+ compact(
+ NonZeroUsize::new(10).unwrap(),
+ Duration::from_secs(3_6000),
+ &components,
+ )
+ .await;
// verify catalog is still empty
let files = setup.list_by_table_not_to_delete().await;
@@ -60,7 +65,12 @@ mod tests {
// compact
let config = Arc::clone(&setup.config);
let components = hardcoded_components(&config);
- compact(NonZeroUsize::new(10).unwrap(), &components).await;
+ compact(
+ NonZeroUsize::new(10).unwrap(),
+ Duration::from_secs(3_6000),
+ &components,
+ )
+ .await;
// verify number of files: 6 files are compacted into 2 files
let files = setup.list_by_table_not_to_delete().await;
@@ -156,7 +166,12 @@ mod tests {
// compact but nothing will be compacted because the partition is skipped
let config = Arc::clone(&setup.config);
let components = hardcoded_components(&config);
- compact(NonZeroUsize::new(10).unwrap(), &components).await;
+ compact(
+ NonZeroUsize::new(10).unwrap(),
+ Duration::from_secs(3_6000),
+ &components,
+ )
+ .await;
// verify still 6 files
let files = setup.list_by_table_not_to_delete().await;
diff --git a/compactor2/src/config.rs b/compactor2/src/config.rs
index c4d6e6eb8f..3e6b5954c0 100644
--- a/compactor2/src/config.rs
+++ b/compactor2/src/config.rs
@@ -57,6 +57,9 @@ pub struct Config {
/// . Any size in the middle will be considered neither too small nor too large
/// This value must be between (0, 100)
pub split_percentage: u16,
+
+ /// Maximum duration of the per-partition compaction task in seconds.
+ pub partition_timeout_secs: u64,
}
impl Config {
@@ -76,6 +79,7 @@ impl Config {
split_percentage: u16,
topic_name: String,
shard_index: i32,
+ partition_timeout_secs: u64,
) -> Self {
// Get shardId from topic and shard_index
// Fetch topic
@@ -130,6 +134,7 @@ impl Config {
percentage_max_file_size,
split_percentage,
shard_id: shard.id,
+ partition_timeout_secs,
}
}
}
diff --git a/compactor2/src/driver.rs b/compactor2/src/driver.rs
index e528ec49d7..d993e0a014 100644
--- a/compactor2/src/driver.rs
+++ b/compactor2/src/driver.rs
@@ -1,4 +1,4 @@
-use std::{num::NonZeroUsize, sync::Arc};
+use std::{num::NonZeroUsize, sync::Arc, time::Duration};
use data_types::{CompactionLevel, PartitionId};
use futures::{stream::FuturesOrdered, StreamExt, TryStreamExt};
@@ -17,22 +17,38 @@ use crate::{components::Components, partition_info::PartitionInfo};
// the other Level-N files. Hence we should never use `created_at` of Level-N+1 files to order
// them with Level-N files.
// . We can only compact different sets of files of the same partition concurrently into the same target_level.
-pub async fn compact(partition_concurrency: NonZeroUsize, components: &Arc<Components>) {
+pub async fn compact(
+ partition_concurrency: NonZeroUsize,
+ partition_timeout: Duration,
+ components: &Arc<Components>,
+) {
let partition_ids = components.partitions_source.fetch().await;
futures::stream::iter(partition_ids)
.map(|partition_id| {
let components = Arc::clone(components);
- compact_partition(partition_id, components)
+ compact_partition(partition_id, partition_timeout, components)
})
.buffer_unordered(partition_concurrency.get())
.collect::<()>()
.await;
}
-async fn compact_partition(partition_id: PartitionId, components: Arc<Components>) {
- let res = try_compact_partition(partition_id, Arc::clone(&components)).await;
+async fn compact_partition(
+ partition_id: PartitionId,
+ partition_timeout: Duration,
+ components: Arc<Components>,
+) {
+ let res = tokio::time::timeout(
+ partition_timeout,
+ try_compact_partition(partition_id, Arc::clone(&components)),
+ )
+ .await;
+ let res = match res {
+ Ok(res) => res,
+ Err(e) => Err(Box::new(e) as _),
+ };
components
.partition_done_sink
.record(partition_id, res)
diff --git a/compactor2/src/error.rs b/compactor2/src/error.rs
index a5c7427b37..ae38d4d1b8 100644
--- a/compactor2/src/error.rs
+++ b/compactor2/src/error.rs
@@ -3,6 +3,7 @@
use datafusion::{arrow::error::ArrowError, error::DataFusionError, parquet::errors::ParquetError};
use object_store::Error as ObjectStoreError;
use std::{error::Error, fmt::Display, sync::Arc};
+use tokio::time::error::Elapsed;
/// What kind of error did we occur during compaction?
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord)]
@@ -19,6 +20,9 @@ pub enum ErrorKind {
/// The compactor shall retry (if possible) with a smaller set of files.
OutOfMemory,
+ /// Partition took too long.
+ Timeout,
+
/// Unknown/unexpected error.
///
/// This will likely mark the affected partition as "skipped" and the compactor will no longer touch it.
@@ -28,7 +32,12 @@ pub enum ErrorKind {
impl ErrorKind {
/// Return all variants.
pub fn variants() -> &'static [Self] {
- &[Self::ObjectStore, Self::OutOfMemory, Self::Unknown]
+ &[
+ Self::ObjectStore,
+ Self::OutOfMemory,
+ Self::Timeout,
+ Self::Unknown,
+ ]
}
/// Return static name.
@@ -36,6 +45,7 @@ impl ErrorKind {
match self {
Self::ObjectStore => "object_store",
Self::OutOfMemory => "out_of_memory",
+ Self::Timeout => "timeout",
Self::Unknown => "unknown",
}
}
@@ -53,33 +63,43 @@ pub trait ErrorKindExt {
fn classify(&self) -> ErrorKind;
}
-impl ErrorKindExt for &ArrowError {
+impl ErrorKindExt for ArrowError {
fn classify(&self) -> ErrorKind {
if let Some(source) = self.source() {
return source.classify();
}
match self {
- ArrowError::ExternalError(e) => e.classify(),
+ Self::ExternalError(e) => e.classify(),
// ArrowError is also mostly broken for many variants
e => try_recover_unknown(e),
}
}
}
-impl ErrorKindExt for &DataFusionError {
+impl ErrorKindExt for DataFusionError {
fn classify(&self) -> ErrorKind {
match self.find_root() {
- DataFusionError::ArrowError(e) => e.classify(),
- DataFusionError::External(e) => e.classify(),
- DataFusionError::ObjectStore(e) => e.classify(),
- DataFusionError::ParquetError(e) => e.classify(),
- DataFusionError::ResourcesExhausted(_) => ErrorKind::OutOfMemory,
+ Self::ArrowError(e) => e.classify(),
+ Self::External(e) => e.classify(),
+ Self::ObjectStore(e) => e.classify(),
+ Self::ParquetError(e) => e.classify(),
+ Self::ResourcesExhausted(_) => ErrorKind::OutOfMemory,
e => try_recover_unknown(e),
}
}
}
+impl ErrorKindExt for Elapsed {
+ fn classify(&self) -> ErrorKind {
+ if let Some(source) = self.source() {
+ return source.classify();
+ }
+
+ ErrorKind::Timeout
+ }
+}
+
impl ErrorKindExt for ObjectStoreError {
fn classify(&self) -> ErrorKind {
if let Some(source) = self.source() {
@@ -115,6 +135,12 @@ macro_rules! dispatch_body {
e.as_ref().classify()
} else if let Some(e) = $self.downcast_ref::<Box<DataFusionError>>() {
e.as_ref().classify()
+ } else if let Some(e) = $self.downcast_ref::<Elapsed>() {
+ e.classify()
+ } else if let Some(e) = $self.downcast_ref::<Arc<Elapsed>>() {
+ e.as_ref().classify()
+ } else if let Some(e) = $self.downcast_ref::<Box<Elapsed>>() {
+ e.as_ref().classify()
} else if let Some(e) = $self.downcast_ref::<ObjectStoreError>() {
e.classify()
} else if let Some(e) = $self.downcast_ref::<Arc<ObjectStoreError>>() {
diff --git a/compactor2/src/test_util.rs b/compactor2/src/test_util.rs
index d3d5ecdbab..d07eb4c307 100644
--- a/compactor2/src/test_util.rs
+++ b/compactor2/src/test_util.rs
@@ -414,6 +414,7 @@ impl TestSetup {
max_desired_file_size_bytes: MAX_DESIRE_FILE_SIZE,
percentage_max_file_size: PERCENTAGE_MAX_FILE_SIZE,
split_percentage: SPLIT_PERCENTAGE,
+ partition_timeout_secs: 3_600,
});
Self {
diff --git a/ioxd_compactor2/src/lib.rs b/ioxd_compactor2/src/lib.rs
index 920370896e..0fe6fc4178 100644
--- a/ioxd_compactor2/src/lib.rs
+++ b/ioxd_compactor2/src/lib.rs
@@ -147,6 +147,7 @@ pub async fn create_compactor2_server_type(
compactor_config.split_percentage,
TOPIC.to_string(),
SHARD_INDEX,
+ compactor_config.partition_timeout_secs,
)
.await,
);
|
906645c5b6cd751a4496c63fe85ab3461a91eb1a
|
Dom Dwyer
|
2022-10-28 12:37:05
|
assert DmlHandler ID propagation
|
Adds an integration test asserting that the NamespaceId and Table IDs
are propagated through the router.
| null |
test(router): assert DmlHandler ID propagation
Adds an integration test asserting that the NamespaceId and Table IDs
are propagated through the router.
|
diff --git a/router/tests/http.rs b/router/tests/http.rs
index 8945c427b0..5bdc9120ff 100644
--- a/router/tests/http.rs
+++ b/router/tests/http.rs
@@ -5,6 +5,7 @@ use data_types::{
ColumnType, PartitionTemplate, QueryPoolId, ShardIndex, TableId, TemplatePart, TopicId,
};
use dml::DmlOperation;
+use futures::{stream::FuturesUnordered, StreamExt};
use hashbrown::HashMap;
use hyper::{Body, Request, StatusCode};
use iox_catalog::{interface::Catalog, mem::MemCatalog};
@@ -134,8 +135,8 @@ impl TestContext {
}
/// Get a reference to the test context's catalog.
- pub fn catalog(&self) -> &dyn Catalog {
- self.catalog.as_ref()
+ pub fn catalog(&self) -> Arc<dyn Catalog> {
+ Arc::clone(&self.catalog)
}
/// Get a reference to the test context's write buffer state.
@@ -343,3 +344,126 @@ async fn test_schema_limit() {
);
assert_eq!(err.as_status_code(), StatusCode::BAD_REQUEST);
}
+
+#[tokio::test]
+async fn test_write_propagate_ids() {
+ let ctx = TestContext::new();
+
+ // Create the namespace and a set of tables.
+ let ns = ctx
+ .catalog()
+ .repositories()
+ .await
+ .namespaces()
+ .create(
+ "bananas_test",
+ iox_catalog::INFINITE_RETENTION_POLICY,
+ TopicId::new(TEST_TOPIC_ID),
+ QueryPoolId::new(TEST_QUERY_POOL_ID),
+ )
+ .await
+ .expect("failed to update table limit");
+
+ let catalog = ctx.catalog();
+ let ids = ["another", "test", "table", "platanos"]
+ .iter()
+ .map(|t| {
+ let catalog = Arc::clone(&catalog);
+ async move {
+ let table = catalog
+ .repositories()
+ .await
+ .tables()
+ .create_or_get(t, ns.id)
+ .await
+ .unwrap();
+ (*t, table.id)
+ }
+ })
+ .collect::<FuturesUnordered<_>>()
+ .collect::<HashMap<_, _>>()
+ .await;
+
+ let request = Request::builder()
+ .uri("https://bananas.example/api/v2/write?org=bananas&bucket=test")
+ .method("POST")
+ .body(Body::from(
+ "\
+ platanos,tag1=A,tag2=B val=42i 123456\n\
+ another,tag1=A,tag2=B val=42i 123458\n\
+ test,tag1=A,tag2=B val=42i 123458\n\
+ platanos,tag1=A,tag2=B val=42i 123458\n\
+ table,tag1=A,tag2=B val=42i 123458\n\
+ ",
+ ))
+ .expect("failed to construct HTTP request");
+
+ let response = ctx
+ .delegate()
+ .route(request)
+ .await
+ .expect("LP write request failed");
+
+ assert_eq!(response.status(), StatusCode::NO_CONTENT);
+
+ // Check the write buffer observed the correct write.
+ let writes = ctx.write_buffer_state().get_messages(ShardIndex::new(0));
+ assert_eq!(writes.len(), 1);
+ assert_matches!(writes.as_slice(), [Ok(DmlOperation::Write(w))] => {
+ assert_eq!(w.namespace(), "bananas_test");
+ assert_eq!(unsafe { w.namespace_id() } , ns.id);
+ assert!(w.table("platanos").is_some());
+
+ for (name, id) in ids {
+ assert_eq!(unsafe { w.table_id(name).unwrap() }, id);
+ }
+ });
+}
+
+#[tokio::test]
+async fn test_delete_propagate_ids() {
+ let ctx = TestContext::new();
+
+ // Create the namespace and a set of tables.
+ let ns = ctx
+ .catalog()
+ .repositories()
+ .await
+ .namespaces()
+ .create(
+ "bananas_test",
+ iox_catalog::INFINITE_RETENTION_POLICY,
+ TopicId::new(TEST_TOPIC_ID),
+ QueryPoolId::new(TEST_QUERY_POOL_ID),
+ )
+ .await
+ .expect("failed to update table limit");
+
+ let request = Request::builder()
+ .uri("https://bananas.example/api/v2/delete?org=bananas&bucket=test")
+ .method("POST")
+ .body(Body::from(
+ r#"{
+ "predicate": "_measurement=bananas",
+ "start": "1970-01-01T00:00:00Z",
+ "stop": "2070-01-02T00:00:00Z"
+ }"#,
+ ))
+ .expect("failed to construct HTTP request");
+
+ let response = ctx
+ .delegate()
+ .route(request)
+ .await
+ .expect("delete request failed");
+
+ assert_eq!(response.status(), StatusCode::NO_CONTENT);
+
+ // Check the write buffer observed the correct write.
+ let writes = ctx.write_buffer_state().get_messages(ShardIndex::new(0));
+ assert_eq!(writes.len(), 1);
+ assert_matches!(writes.as_slice(), [Ok(DmlOperation::Delete(w))] => {
+ assert_eq!(w.namespace(), "bananas_test");
+ assert_eq!(unsafe { w.namespace_id() } , ns.id);
+ });
+}
|
4a8bb871dcd8c4d8c45d11622abb6cbd2578efb9
|
Marco Neumann
|
2022-12-01 12:51:47
|
revert stream-based `SeriesSetConvert::convert` interface (#6282)
|
This reverts commit dad6dee924ef71b414e4fc3b79864e454f4f7fea.
| null |
refactor: revert stream-based `SeriesSetConvert::convert` interface (#6282)
This reverts commit dad6dee924ef71b414e4fc3b79864e454f4f7fea.
|
diff --git a/iox_query/src/exec/context.rs b/iox_query/src/exec/context.rs
index 3dd8925ed1..d37cd724c0 100644
--- a/iox_query/src/exec/context.rs
+++ b/iox_query/src/exec/context.rs
@@ -451,9 +451,17 @@ impl IOxSessionContext {
let it = ctx.execute_stream(physical_plan).await?;
- SeriesSetConverter::default()
+ let series_sets = SeriesSetConverter::default()
.convert(table_name, tag_columns, field_columns, it)
.await
+ .map_err(|e| {
+ Error::Execution(format!(
+ "Error executing series set conversion: {}",
+ e
+ ))
+ })?;
+
+ Ok(futures::stream::iter(series_sets).map(|x| Ok(x) as Result<_>))
})
})
.try_flatten()
diff --git a/iox_query/src/exec/seriesset/converter.rs b/iox_query/src/exec/seriesset/converter.rs
index 7ea4a1ca81..4de278b54d 100644
--- a/iox_query/src/exec/seriesset/converter.rs
+++ b/iox_query/src/exec/seriesset/converter.rs
@@ -15,8 +15,9 @@ use datafusion::{
};
use futures::{Stream, StreamExt, TryStreamExt};
-use snafu::{OptionExt, Snafu};
+use snafu::{OptionExt, ResultExt, Snafu};
use std::sync::Arc;
+use tokio::sync::mpsc::error::SendError;
use crate::exec::{
field::{self, FieldColumns, FieldIndexes},
@@ -30,11 +31,40 @@ use super::{
#[derive(Debug, Snafu)]
pub enum Error {
+ #[snafu(display("Plan Execution Error: {}", source))]
+ Execution {
+ source: Box<dyn std::error::Error + Send + Sync + 'static>,
+ },
+
+ #[snafu(display(
+ "Error reading record batch while converting from SeriesSet: {:?}",
+ source
+ ))]
+ Reading {
+ source: datafusion::error::DataFusionError,
+ },
+
+ #[snafu(display(
+ "Error concatenating record batch while converting from SeriesSet: {:?}",
+ source
+ ))]
+ Concatenating { source: arrow::error::ArrowError },
+
#[snafu(display("Internal field error while converting series set: {}", source))]
InternalField { source: field::Error },
#[snafu(display("Internal error finding grouping colum: {}", column_name))]
FindingGroupColumn { column_name: String },
+
+ #[snafu(display("Sending series set results during conversion: {:?}", source))]
+ SendingDuringConversion {
+ source: Box<SendError<Result<SeriesSet>>>,
+ },
+
+ #[snafu(display("Sending grouped series set results during conversion: {:?}", source))]
+ SendingDuringGroupedConversion {
+ source: Box<SendError<Result<SeriesSet>>>,
+ },
}
pub type Result<T, E = Error> = std::result::Result<T, E>;
@@ -63,54 +93,26 @@ impl SeriesSetConverter {
tag_columns: Arc<Vec<Arc<str>>>,
field_columns: FieldColumns,
it: SendableRecordBatchStream,
- ) -> Result<impl Stream<Item = Result<SeriesSet, DataFusionError>>, DataFusionError> {
- assert_eq!(
- tag_columns.as_ref(),
- &{
- let mut tmp = tag_columns.as_ref().clone();
- tmp.sort();
- tmp
- },
- "Tag column sorted",
- );
-
- let schema = it.schema();
-
+ ) -> Result<Vec<SeriesSet>, Error> {
// for now, this logic only handles a single `RecordBatch` so
// concat data together.
//
// proper streaming support tracked by:
// https://github.com/influxdata/influxdb_iox/issues/4445
- let batches = collect(it).await.map_err(|e| {
- DataFusionError::Context(
- "Error reading record batch while converting from SeriesSet".to_string(),
- Box::new(e),
- )
- })?;
-
- let batch = compute::concat_batches(&schema, &batches).map_err(|e| {
- DataFusionError::Context(
- "Error concatenating record batch while converting from SeriesSet".to_string(),
- Box::new(DataFusionError::ArrowError(e)),
- )
- })?;
- if batch.num_rows() == 0 {
- return Ok(futures::stream::empty().boxed());
- }
+ let batches = collect(it).await.context(ReadingSnafu)?;
- let tag_indexes = FieldIndexes::names_to_indexes(&schema, &tag_columns).map_err(|e| {
- DataFusionError::Context(
- "Internal field error while converting series set".to_string(),
- Box::new(DataFusionError::External(Box::new(e))),
- )
- })?;
- let field_indexes =
- FieldIndexes::from_field_columns(&schema, &field_columns).map_err(|e| {
- DataFusionError::Context(
- "Internal field error while converting series set".to_string(),
- Box::new(DataFusionError::External(Box::new(e))),
- )
- })?;
+ let batch = if !batches.is_empty() {
+ compute::concat_batches(&batches[0].schema(), &batches).context(ConcatenatingSnafu)?
+ } else {
+ return Ok(vec![]);
+ };
+
+ let schema = batch.schema();
+ // TODO: check that the tag columns are sorted by tag name...
+ let tag_indexes =
+ FieldIndexes::names_to_indexes(&schema, &tag_columns).context(InternalFieldSnafu)?;
+ let field_indexes = FieldIndexes::from_field_columns(&schema, &field_columns)
+ .context(InternalFieldSnafu)?;
// Algorithm: compute, via bitsets, the rows at which each
// tag column changes and thereby where the tagset
@@ -146,7 +148,7 @@ impl SeriesSetConverter {
// call await during the loop)
// emit each series
- let series_sets: Vec<_> = intersections
+ let series_sets = intersections
.into_iter()
.map(|end_row| {
let series_set = SeriesSet {
@@ -161,7 +163,7 @@ impl SeriesSetConverter {
})
.collect();
- Ok(futures::stream::iter(series_sets).map(Ok).boxed())
+ Ok(series_sets)
}
/// returns a bitset with all row indexes where the value of the
@@ -775,9 +777,6 @@ mod tests {
.convert(table_name, tag_columns, field_columns, it)
.await
.expect("Conversion happened without error")
- .try_collect()
- .await
- .expect("Conversion happened without error")
}
/// Test helper: parses the csv content into a single record batch arrow
|
fbfbe1adb49b8ce666dc7726808b3e6052f256d7
|
Carol (Nichols || Goulding)
|
2023-01-30 17:16:14
|
Remove track_caller from async fns as it's a no-op
|
Rust 1.67 now says:
|
warning: `#[track_caller]` on async functions is a no-op
= note: see issue #87417 <https://github.com/rust-lang/rust/issues/87417> for more information
= note: `#[warn(ungated_async_fn_track_caller)]` on by default
|
fix: Remove track_caller from async fns as it's a no-op
Rust 1.67 now says:
warning: `#[track_caller]` on async functions is a no-op
= note: see issue #87417 <https://github.com/rust-lang/rust/issues/87417> for more information
= note: `#[warn(ungated_async_fn_track_caller)]` on by default
|
diff --git a/compactor2/src/components/scratchpad/test_util.rs b/compactor2/src/components/scratchpad/test_util.rs
index 0c9dc893d6..9362c07580 100644
--- a/compactor2/src/components/scratchpad/test_util.rs
+++ b/compactor2/src/components/scratchpad/test_util.rs
@@ -29,7 +29,6 @@ pub fn file_path(i: u128) -> ParquetFilePath {
)
}
-#[track_caller]
pub async fn assert_content<const N: usize>(
store: &Arc<DynObjectStore>,
files: [&ParquetFilePath; N],
diff --git a/ingester/tests/common/mod.rs b/ingester/tests/common/mod.rs
index 876354144f..8ae20ee581 100644
--- a/ingester/tests/common/mod.rs
+++ b/ingester/tests/common/mod.rs
@@ -174,7 +174,6 @@ impl TestContext {
/// # Panics
///
/// Must not be called twice with the same `name`.
- #[track_caller]
pub async fn ensure_namespace(
&mut self,
name: &str,
@@ -211,7 +210,6 @@ impl TestContext {
}
/// A helper wrapper over [`Self::enqueue_write()`] for line-protocol.
- #[track_caller]
pub async fn write_lp(
&mut self,
namespace: &str,
|
76ed1afb01c88521b3f00b6fdeffce8057332f3e
|
Dom Dwyer
|
2022-11-09 18:53:01
|
support prefetch deferred loads
|
Allow a caller to signal to the DeferredLoad that the value it may or
may not have to materialise will be used imminently, optimistically
hiding the latency of resolving the value (typically a catalog query).
| null |
perf(ingester): support prefetch deferred loads
Allow a caller to signal to the DeferredLoad that the value it may or
may not have to materialise will be used imminently, optimistically
hiding the latency of resolving the value (typically a catalog query).
|
diff --git a/ingester/src/deferred_load.rs b/ingester/src/deferred_load.rs
index 539eaefa0a..d5aba7624e 100644
--- a/ingester/src/deferred_load.rs
+++ b/ingester/src/deferred_load.rs
@@ -68,6 +68,73 @@ where
}
}
+impl<T> DeferredLoad<T> {
+ /// Provide a hint to the [`DeferredLoad`] that the value will be used soon.
+ ///
+ /// This allows the value to be materialised in the background, in parallel
+ /// while the caller is executing code that will eventually call
+ /// [`Self::get()`].
+ pub(crate) fn prefetch_now(&self) {
+ let mut state = self.value.lock();
+
+ // If the value has already resolved, this call is a NOP.
+ if let Some(State::Resolved(_)) = &*state {
+ return;
+ }
+
+ // Potentially transition the state, discarding the waker.
+ let (_, new_state) = self.get_load_waker(state.take().unwrap());
+ *state = Some(new_state);
+ }
+
+ /// Potentially transition `state`, returning the new state and the
+ /// [`Notify`] that will be signalled when loading the value completes.
+ ///
+ /// # Panics
+ ///
+ /// This method panics if `state` is [`State::Resolved`].
+ fn get_load_waker(&self, state: State<T>) -> (Arc<Notify>, State<T>) {
+ let waker = match state {
+ // This caller is the first to demand the value - wake the
+ // background task, initialise the notification mechanism and
+ // wait for the task to complete.
+ State::Unresolved(task_waker) => {
+ // Wake the running background task, ignoring any send error
+ // as the background task may have concurrently woken up due
+ // to the sleep timer and stopped listening on the waker
+ // channel.
+ let _ = task_waker.send(());
+
+ // Replace the state with a notification for this thread
+ // (and others that call get()) to wait on for the
+ // concurrent fetch to complete.
+ Arc::new(Notify::default())
+ }
+
+ // If the value is already being fetched, wait for the fetch to
+ // complete.
+ State::Loading(waker) => waker,
+
+ // This was checked above before take()-ing the state.
+ State::Resolved(_) => unreachable!(),
+ };
+
+ // Ensure any subsequent callers can subscribe to the completion
+ // event by transitioning to the loading state.
+ let state = State::Loading(Arc::clone(&waker));
+
+ // Whenever there is a waker for the caller, the background task
+ // MUST be running.
+ //
+ // This check happens before the state lock is released, ensuring
+ // the background task doesn't concurrently finish (it would be
+ // blocked waiting to update the state).
+ assert!(!self.handle.is_finished());
+
+ (waker, state)
+ }
+}
+
impl<T> DeferredLoad<T>
where
T: Send + Sync + 'static,
@@ -179,42 +246,8 @@ where
// If execution reaches here, this call will have to wait for the
// value to be resolved, and potentially must wake the background
// task to do so.
- let waker = match state.take().unwrap() {
- // This caller is the first to demand the value - wake the
- // background task, initialise the notification mechanism and
- // wait for the task to complete.
- State::Unresolved(task_waker) => {
- // Wake the running background task, ignoring any send error
- // as the background task may have concurrently woken up due
- // to the sleep timer and stopped listening on the waker
- // channel.
- let _ = task_waker.send(());
-
- // Replace the state with a notification for this thread
- // (and others that call get()) to wait on for the
- // concurrent fetch to complete.
- Arc::new(Notify::default())
- }
-
- // If the value is already being fetched, wait for the fetch to
- // complete.
- State::Loading(waker) => waker,
-
- // This was checked above before take()-ing the state.
- State::Resolved(_) => unreachable!(),
- };
-
- // Ensure any subsequent callers can subscribe to the completion
- // event.
- *state = Some(State::Loading(Arc::clone(&waker)));
-
- // Whenever there is a waker for the caller, the background task
- // MUST be running.
- //
- // This check happens before the state lock is released, ensuring
- // the background task doesn't concurrently finish (it would be
- // blocked waiting to update the state).
- assert!(!self.handle.is_finished());
+ let (waker, new_state) = self.get_load_waker(state.take().unwrap());
+ *state = Some(new_state);
waker
};
@@ -371,4 +404,56 @@ mod tests {
fut.as_mut().with_timeout_panic(TIMEOUT).await;
assert_eq!(fut.as_mut().take_output(), Some(42));
}
+
+ #[tokio::test]
+ async fn test_prefetch_concurrent_demand() {
+ // This channel is used to signal the background load has begun.
+ let (signal_start, started) = oneshot::channel();
+
+ // This channel is used to block the background task from completing
+ // after the above channel has signalled it has begun.
+ let (allow_complete, can_complete) = oneshot::channel();
+
+ // Configure the background load to fire (practically) immediately but
+ // block waiting for rx to be unblocked.
+ //
+ // This allows the current thread time to issue a demand and wait on the
+ // result before the background load completes.
+ let d = Arc::new(DeferredLoad::new(LONG_LONG_TIME, async {
+ // Signal the background task has begun.
+ signal_start.send(()).expect("test task died");
+ // Wait for the test thread to issue the demand call and unblock
+ // this fn.
+ can_complete.await.expect("sender died");
+ 42
+ }));
+
+ d.prefetch_now();
+
+ // Wait for the background task to begin.
+ started
+ .with_timeout_panic(Duration::from_secs(5))
+ .await
+ .expect("background task died");
+
+ // Issue a demand call
+ let fut = future::maybe_done(d.get());
+ pin_mut!(fut);
+ assert_eq!(fut.as_mut().take_output(), None);
+
+ // Unblock the background task.
+ allow_complete.send(()).expect("background task died");
+
+ // And await the demand call
+ fut.as_mut().await;
+ assert_eq!(fut.as_mut().take_output(), Some(42));
+ }
+
+ #[tokio::test]
+ async fn test_prefetch_already_loaded() {
+ let d = Arc::new(DeferredLoad::new(LONG_LONG_TIME, async { 42 }));
+
+ let _ = d.get().with_timeout_panic(TIMEOUT).await;
+ d.prefetch_now();
+ }
}
|
1e5d3f31af6e54faaa9b8b2402db2fa26317e860
|
Dom Dwyer
|
2022-11-11 18:36:20
|
clearer code comments / docs
|
Remove redundant comments & clarify returns.
| null |
docs: clearer code comments / docs
Remove redundant comments & clarify returns.
|
diff --git a/ingester/src/data/shard.rs b/ingester/src/data/shard.rs
index 02ca70ea04..184af03da4 100644
--- a/ingester/src/data/shard.rs
+++ b/ingester/src/data/shard.rs
@@ -192,7 +192,6 @@ mod tests {
.await
.expect("buffer op should succeed");
- // Both forms of referencing the table should succeed
assert!(shard.namespace(NAMESPACE_ID).is_some());
// And the table counter metric should increase
diff --git a/ingester/src/deferred_load.rs b/ingester/src/deferred_load.rs
index 8549110a45..45526a36ae 100644
--- a/ingester/src/deferred_load.rs
+++ b/ingester/src/deferred_load.rs
@@ -104,8 +104,9 @@ impl<T> DeferredLoad<T> {
*state = Some(new_state);
}
- /// Potentially transition `state`, returning the new state and the
- /// [`Notify`] that will be signalled when loading the value completes.
+ /// Potentially transition `state`, returning the [`Notify`] that will be
+ /// signalled when loading the value completes, and the (potentially
+ /// changed) state.
///
/// # Panics
///
|
e0ecacf6cc5d51b26eaee7d212879355ba3dd7c8
|
Andrew Lamb
|
2022-12-12 07:14:00
|
Update DataFusion (get median fix and automatic string to timestamp coercion) (#6363)
|
* chore: Update DataFusion pin to get median fix
* chore: Update for new Expr node
* test: add test for median
* test: add test for coercion of strings to timestamps
* chore: Run cargo hakari tasks
|
Co-authored-by: CircleCI[bot] <[email protected]>
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
chore: Update DataFusion (get median fix and automatic string to timestamp coercion) (#6363)
* chore: Update DataFusion pin to get median fix
* chore: Update for new Expr node
* test: add test for median
* test: add test for coercion of strings to timestamps
* chore: Run cargo hakari tasks
Co-authored-by: CircleCI[bot] <[email protected]>
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
diff --git a/Cargo.lock b/Cargo.lock
index 24965fe480..14276c503c 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -1244,7 +1244,7 @@ dependencies = [
[[package]]
name = "datafusion"
version = "15.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=fbadebb894672f61327a30f77cda2ee88a343b2a#fbadebb894672f61327a30f77cda2ee88a343b2a"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=f2eb3b2bebffe75df06f3e55f2413728e7e19f2c#f2eb3b2bebffe75df06f3e55f2413728e7e19f2c"
dependencies = [
"ahash 0.8.2",
"arrow",
@@ -1276,7 +1276,6 @@ dependencies = [
"pin-project-lite",
"rand",
"smallvec",
- "sqllogictest",
"sqlparser 0.27.0",
"tempfile",
"tokio",
@@ -1290,7 +1289,7 @@ dependencies = [
[[package]]
name = "datafusion-common"
version = "15.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=fbadebb894672f61327a30f77cda2ee88a343b2a#fbadebb894672f61327a30f77cda2ee88a343b2a"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=f2eb3b2bebffe75df06f3e55f2413728e7e19f2c#f2eb3b2bebffe75df06f3e55f2413728e7e19f2c"
dependencies = [
"arrow",
"chrono",
@@ -1302,7 +1301,7 @@ dependencies = [
[[package]]
name = "datafusion-expr"
version = "15.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=fbadebb894672f61327a30f77cda2ee88a343b2a#fbadebb894672f61327a30f77cda2ee88a343b2a"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=f2eb3b2bebffe75df06f3e55f2413728e7e19f2c#f2eb3b2bebffe75df06f3e55f2413728e7e19f2c"
dependencies = [
"ahash 0.8.2",
"arrow",
@@ -1314,7 +1313,7 @@ dependencies = [
[[package]]
name = "datafusion-optimizer"
version = "15.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=fbadebb894672f61327a30f77cda2ee88a343b2a#fbadebb894672f61327a30f77cda2ee88a343b2a"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=f2eb3b2bebffe75df06f3e55f2413728e7e19f2c#f2eb3b2bebffe75df06f3e55f2413728e7e19f2c"
dependencies = [
"arrow",
"async-trait",
@@ -1329,7 +1328,7 @@ dependencies = [
[[package]]
name = "datafusion-physical-expr"
version = "15.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=fbadebb894672f61327a30f77cda2ee88a343b2a#fbadebb894672f61327a30f77cda2ee88a343b2a"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=f2eb3b2bebffe75df06f3e55f2413728e7e19f2c#f2eb3b2bebffe75df06f3e55f2413728e7e19f2c"
dependencies = [
"ahash 0.8.2",
"arrow",
@@ -1358,7 +1357,7 @@ dependencies = [
[[package]]
name = "datafusion-proto"
version = "15.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=fbadebb894672f61327a30f77cda2ee88a343b2a#fbadebb894672f61327a30f77cda2ee88a343b2a"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=f2eb3b2bebffe75df06f3e55f2413728e7e19f2c#f2eb3b2bebffe75df06f3e55f2413728e7e19f2c"
dependencies = [
"arrow",
"chrono",
@@ -1375,7 +1374,7 @@ dependencies = [
[[package]]
name = "datafusion-row"
version = "15.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=fbadebb894672f61327a30f77cda2ee88a343b2a#fbadebb894672f61327a30f77cda2ee88a343b2a"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=f2eb3b2bebffe75df06f3e55f2413728e7e19f2c#f2eb3b2bebffe75df06f3e55f2413728e7e19f2c"
dependencies = [
"arrow",
"datafusion-common",
@@ -1386,11 +1385,12 @@ dependencies = [
[[package]]
name = "datafusion-sql"
version = "15.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=fbadebb894672f61327a30f77cda2ee88a343b2a#fbadebb894672f61327a30f77cda2ee88a343b2a"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=f2eb3b2bebffe75df06f3e55f2413728e7e19f2c#f2eb3b2bebffe75df06f3e55f2413728e7e19f2c"
dependencies = [
"arrow-schema",
"datafusion-common",
"datafusion-expr",
+ "log",
"sqlparser 0.27.0",
]
@@ -1424,12 +1424,6 @@ version = "0.1.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "56254986775e3233ffa9c4d7d3faaf6d36a2c09d30b20687e9f88bc8bafc16c8"
-[[package]]
-name = "difference"
-version = "2.0.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "524cbf6897b527295dff137cec09ecf3a05f4fddffd7dfcd1585403449e74198"
-
[[package]]
name = "difflib"
version = "0.4.0"
@@ -3030,17 +3024,6 @@ version = "0.2.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "348108ab3fba42ec82ff6e9564fc4ca0247bdccdc68dd8af9764bbc79c3c8ffb"
-[[package]]
-name = "libtest-mimic"
-version = "0.6.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d7b603516767d1ab23d0de09d023e62966c3322f7148297c35cf3d97aa8b37fa"
-dependencies = [
- "clap 4.0.29",
- "termcolor",
- "threadpool",
-]
-
[[package]]
name = "link-cplusplus"
version = "1.0.7"
@@ -5099,25 +5082,6 @@ dependencies = [
"unicode_categories",
]
-[[package]]
-name = "sqllogictest"
-version = "0.8.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ba41e01d229d7725401de371e323851f82d839d68732a06162405362b60852fe"
-dependencies = [
- "async-trait",
- "difference",
- "futures",
- "glob",
- "humantime",
- "itertools",
- "libtest-mimic",
- "regex",
- "tempfile",
- "thiserror",
- "tracing",
-]
-
[[package]]
name = "sqlparser"
version = "0.27.0"
@@ -6371,7 +6335,6 @@ dependencies = [
"bytes",
"cc",
"chrono",
- "clap 4.0.29",
"crossbeam-utils",
"crypto-common",
"datafusion",
@@ -6391,6 +6354,7 @@ dependencies = [
"hashbrown 0.13.1",
"heck",
"indexmap",
+ "io-lifetimes",
"libc",
"lock_api",
"log",
diff --git a/Cargo.toml b/Cargo.toml
index 15e5da6ff5..c9b342ca8b 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -114,8 +114,8 @@ license = "MIT OR Apache-2.0"
[workspace.dependencies]
arrow = { version = "28.0.0" }
arrow-flight = { version = "28.0.0" }
-datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev="fbadebb894672f61327a30f77cda2ee88a343b2a", default-features = false }
-datafusion-proto = { git = "https://github.com/apache/arrow-datafusion.git", rev="fbadebb894672f61327a30f77cda2ee88a343b2a" }
+datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev="f2eb3b2bebffe75df06f3e55f2413728e7e19f2c", default-features = false }
+datafusion-proto = { git = "https://github.com/apache/arrow-datafusion.git", rev="f2eb3b2bebffe75df06f3e55f2413728e7e19f2c" }
hashbrown = { version = "0.13.1" }
parquet = { version = "28.0.0" }
diff --git a/predicate/src/lib.rs b/predicate/src/lib.rs
index 4940a86296..519507050d 100644
--- a/predicate/src/lib.rs
+++ b/predicate/src/lib.rs
@@ -635,6 +635,7 @@ impl ExpressionVisitor for RowBasedVisitor {
| Expr::Literal(_)
| Expr::Negative(_)
| Expr::Not(_)
+ | Expr::Placeholder { .. }
| Expr::QualifiedWildcard { .. }
| Expr::ScalarFunction { .. }
| Expr::ScalarSubquery(_)
diff --git a/query_tests/cases/in/basic.expected b/query_tests/cases/in/basic.expected
index ceb093cb76..ae7d145702 100644
--- a/query_tests/cases/in/basic.expected
+++ b/query_tests/cases/in/basic.expected
@@ -25,6 +25,20 @@
+--------+--------------------------------+------+
| west | 1970-01-01T00:00:00.000000150Z | 21 |
+--------+--------------------------------+------+
+-- SQL: SELECT * from cpu where time > '1970-01-01T00:00:00'::timestamp ORDER BY time;
++--------+--------------------------------+------+
+| region | time | user |
++--------+--------------------------------+------+
+| west | 1970-01-01T00:00:00.000000100Z | 23.2 |
+| west | 1970-01-01T00:00:00.000000150Z | 21 |
++--------+--------------------------------+------+
+-- SQL: SELECT * from cpu where time > '1970-01-01T00:00:00' ORDER BY time;
++--------+--------------------------------+------+
+| region | time | user |
++--------+--------------------------------+------+
+| west | 1970-01-01T00:00:00.000000100Z | 23.2 |
+| west | 1970-01-01T00:00:00.000000150Z | 21 |
++--------+--------------------------------+------+
-- SQL: SELECT "user", region from cpu where time > to_timestamp('1970-01-01T00:00:00.000000120+00:00');
+------+--------+
| user | region |
@@ -43,3 +57,9 @@
+-------+--------+--------------------------------+
| 99 | east | 1970-01-01T00:00:00.000000200Z |
+-------+--------+--------------------------------+
+-- SQL: select MEDIAN("user"), region from cpu group by region;
++------------------+--------+
+| MEDIAN(cpu.user) | region |
++------------------+--------+
+| 22.1 | west |
++------------------+--------+
diff --git a/query_tests/cases/in/basic.sql b/query_tests/cases/in/basic.sql
index a307231a48..5c9b2fada2 100644
--- a/query_tests/cases/in/basic.sql
+++ b/query_tests/cases/in/basic.sql
@@ -13,6 +13,12 @@ SELECT "user", region from cpu;
-- predicate on CPU
SELECT * from cpu where time > to_timestamp('1970-01-01T00:00:00.000000120+00:00');
+-- predicate on CPU with explicit coercion (cast string to timestamp)
+SELECT * from cpu where time > '1970-01-01T00:00:00'::timestamp ORDER BY time;
+
+-- predicate on CPU with automatic coercion (comparing time to string)
+SELECT * from cpu where time > '1970-01-01T00:00:00' ORDER BY time;
+
-- projection and predicate
-- expect that to get a subset of the columns and in the order specified
SELECT "user", region from cpu where time > to_timestamp('1970-01-01T00:00:00.000000120+00:00');
@@ -20,6 +26,8 @@ SELECT "user", region from cpu where time > to_timestamp('1970-01-01T00:00:00.00
-- basic grouping
SELECT count(*) from cpu group by region;
-
-- select from a different measurement
SELECT * from disk;
+
+-- MEDIAN should work
+select MEDIAN("user"), region from cpu group by region;
diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml
index ea2307dc81..340127816b 100644
--- a/workspace-hack/Cargo.toml
+++ b/workspace-hack/Cargo.toml
@@ -23,10 +23,9 @@ bitflags = { version = "1" }
byteorder = { version = "1", features = ["std"] }
bytes = { version = "1", features = ["std"] }
chrono = { version = "0.4", default-features = false, features = ["alloc", "clock", "iana-time-zone", "serde", "std", "winapi"] }
-clap = { version = "4", features = ["color", "derive", "env", "error-context", "help", "std", "suggestions", "usage"] }
crossbeam-utils = { version = "0.8", features = ["std"] }
crypto-common = { version = "0.1", default-features = false, features = ["std"] }
-datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "fbadebb894672f61327a30f77cda2ee88a343b2a", features = ["async-compression", "bzip2", "compression", "crypto_expressions", "flate2", "regex_expressions", "unicode_expressions", "xz2"] }
+datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "f2eb3b2bebffe75df06f3e55f2413728e7e19f2c", features = ["async-compression", "bzip2", "compression", "crypto_expressions", "flate2", "regex_expressions", "unicode_expressions", "xz2"] }
digest = { version = "0.10", features = ["alloc", "block-buffer", "core-api", "mac", "std", "subtle"] }
either = { version = "1", features = ["use_std"] }
fixedbitset = { version = "0.4", features = ["std"] }
@@ -137,6 +136,7 @@ url = { version = "2" }
uuid = { version = "1", features = ["getrandom", "rng", "std", "v4"] }
[target.x86_64-unknown-linux-gnu.dependencies]
+io-lifetimes = { version = "1", features = ["close", "libc", "windows-sys"] }
once_cell = { version = "1", default-features = false, features = ["unstable"] }
rustix = { version = "0.36", features = ["fs", "io-lifetimes", "libc", "std", "termios", "use-libc-auxv"] }
@@ -144,6 +144,7 @@ rustix = { version = "0.36", features = ["fs", "io-lifetimes", "libc", "std", "t
once_cell = { version = "1", default-features = false, features = ["unstable"] }
[target.x86_64-apple-darwin.dependencies]
+io-lifetimes = { version = "1", features = ["close", "libc", "windows-sys"] }
once_cell = { version = "1", default-features = false, features = ["unstable"] }
rustix = { version = "0.36", features = ["fs", "io-lifetimes", "libc", "std", "termios", "use-libc-auxv"] }
@@ -151,6 +152,7 @@ rustix = { version = "0.36", features = ["fs", "io-lifetimes", "libc", "std", "t
once_cell = { version = "1", default-features = false, features = ["unstable"] }
[target.aarch64-apple-darwin.dependencies]
+io-lifetimes = { version = "1", features = ["close", "libc", "windows-sys"] }
once_cell = { version = "1", default-features = false, features = ["unstable"] }
rustix = { version = "0.36", features = ["fs", "io-lifetimes", "libc", "std", "termios", "use-libc-auxv"] }
|
e4179605dfec25319c82d681775029ff90452476
|
Dom Dwyer
|
2022-10-14 14:46:34
|
assert default service limit values
|
Adds tests that assert the default service limit values.
| null |
test: assert default service limit values
Adds tests that assert the default service limit values.
|
diff --git a/iox_catalog/src/interface.rs b/iox_catalog/src/interface.rs
index 8d78d33cfc..d76698ba2b 100644
--- a/iox_catalog/src/interface.rs
+++ b/iox_catalog/src/interface.rs
@@ -864,7 +864,7 @@ pub async fn list_schemas(
#[cfg(test)]
pub(crate) mod test_helpers {
- use crate::validate_or_insert_schema;
+ use crate::{validate_or_insert_schema, DEFAULT_MAX_COLUMNS_PER_TABLE, DEFAULT_MAX_TABLES};
use super::*;
use ::test_helpers::{assert_contains, tracing::TracingCapture};
@@ -956,6 +956,13 @@ pub(crate) mod test_helpers {
assert!(namespace.id > NamespaceId::new(0));
assert_eq!(namespace.name, namespace_name);
+ // Assert default values for service protection limits.
+ assert_eq!(namespace.max_tables, DEFAULT_MAX_TABLES);
+ assert_eq!(
+ namespace.max_columns_per_table,
+ DEFAULT_MAX_COLUMNS_PER_TABLE
+ );
+
let conflict = repos
.namespaces()
.create(namespace_name, "inf", topic.id, pool.id)
@@ -4021,7 +4028,12 @@ pub(crate) mod test_helpers {
let batches = mutable_batch_lp::lines_to_batches(lines, 42).unwrap();
let batches = batches.iter().map(|(table, batch)| (table.as_str(), batch));
- let ns = NamespaceSchema::new(namespace.id, topic.id, pool.id, 1000);
+ let ns = NamespaceSchema::new(
+ namespace.id,
+ topic.id,
+ pool.id,
+ namespace.max_columns_per_table,
+ );
let schema = validate_or_insert_schema(batches, &ns, repos)
.await
diff --git a/iox_catalog/src/lib.rs b/iox_catalog/src/lib.rs
index 13e595a46b..6986c2a585 100644
--- a/iox_catalog/src/lib.rs
+++ b/iox_catalog/src/lib.rs
@@ -27,6 +27,11 @@ const SHARED_TOPIC_NAME: &str = "iox-shared";
const SHARED_QUERY_POOL: &str = SHARED_TOPIC_NAME;
const TIME_COLUMN: &str = "time";
+/// Default per-namespace table count service protection limit.
+const DEFAULT_MAX_TABLES: i32 = 1000;
+/// Default per-table column count service protection limit.
+const DEFAULT_MAX_COLUMNS_PER_TABLE: i32 = 200;
+
/// A string value representing an infinite retention policy.
pub const INFINITE_RETENTION_POLICY: &str = "inf";
diff --git a/iox_catalog/src/mem.rs b/iox_catalog/src/mem.rs
index 45be0e8838..bdba0ccd6b 100644
--- a/iox_catalog/src/mem.rs
+++ b/iox_catalog/src/mem.rs
@@ -9,6 +9,7 @@ use crate::{
TombstoneRepo, TopicMetadataRepo, Transaction,
},
metrics::MetricDecorator,
+ DEFAULT_MAX_COLUMNS_PER_TABLE, DEFAULT_MAX_TABLES,
};
use async_trait::async_trait;
use data_types::{
@@ -302,8 +303,8 @@ impl NamespaceRepo for MemTxn {
topic_id,
query_pool_id,
retention_duration: Some(retention_duration.to_string()),
- max_tables: 10000,
- max_columns_per_table: 1000,
+ max_tables: DEFAULT_MAX_TABLES,
+ max_columns_per_table: DEFAULT_MAX_COLUMNS_PER_TABLE,
};
stage.namespaces.push(namespace);
Ok(stage.namespaces.last().unwrap().clone())
diff --git a/iox_catalog/src/postgres.rs b/iox_catalog/src/postgres.rs
index 7561303d0e..2c02e2fb45 100644
--- a/iox_catalog/src/postgres.rs
+++ b/iox_catalog/src/postgres.rs
@@ -8,6 +8,7 @@ use crate::{
TombstoneRepo, TopicMetadataRepo, Transaction,
},
metrics::MetricDecorator,
+ DEFAULT_MAX_COLUMNS_PER_TABLE, DEFAULT_MAX_TABLES,
};
use async_trait::async_trait;
use data_types::{
@@ -618,6 +619,10 @@ RETURNING *;
}
})?;
+ // Ensure the column default values match the code values.
+ debug_assert_eq!(rec.max_tables, DEFAULT_MAX_TABLES);
+ debug_assert_eq!(rec.max_columns_per_table, DEFAULT_MAX_COLUMNS_PER_TABLE);
+
Ok(rec)
}
|
09fe26841941184405b519a8a89b6614a9e10f74
|
Trevor Hilton
|
2024-05-06 15:21:18
|
clean up heappy, pprof, and jemalloc (#24967)
|
* chore: clean up heappy, pprof, and jemalloc
Setup the use of jemalloc as default allocator using tikv-jemallocator
crate instead of tikv-jemalloc-sys.
Removed heappy and pprof, and also cleaned up all the mutually exclusive
compiler flags for using heappy as the allocator.
* chore: remove heappy from ci
| null |
chore: clean up heappy, pprof, and jemalloc (#24967)
* chore: clean up heappy, pprof, and jemalloc
Setup the use of jemalloc as default allocator using tikv-jemallocator
crate instead of tikv-jemalloc-sys.
Removed heappy and pprof, and also cleaned up all the mutually exclusive
compiler flags for using heappy as the allocator.
* chore: remove heappy from ci
|
diff --git a/.circleci/config.yml b/.circleci/config.yml
index 3fe754851e..15e836c928 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -191,27 +191,6 @@ jobs:
name: cargo test --workspace
command: cargo test --workspace
- # end to end tests with Heappy (heap profiling enabled)
- test-heappy:
- docker:
- - image: quay.io/influxdb/rust:ci
- resource_class: xlarge # use of a smaller executor tends crashes on link
- environment:
- # Disable incremental compilation to avoid overhead. We are not preserving these files anyway.
- CARGO_INCREMENTAL: "0"
- # Disable full debug symbol generation to speed up CI build
- # "1" means line tables only, which is useful for panic tracebacks.
- CARGO_PROFILE_DEV_DEBUG: "1"
- # https://github.com/rust-lang/cargo/issues/10280
- CARGO_NET_GIT_FETCH_WITH_CLI: "true"
- RUST_BACKTRACE: "1"
- steps:
- - checkout
- - rust_components
- - run:
- name: cargo test --no-default-features --features=heappy --workspace
- command: cargo test --no-default-features --features=heappy --workspace
-
# Build a dev binary.
#
# Compiles a binary with the default ("dev") cargo profile from the influxdb3 source
@@ -257,7 +236,7 @@ jobs:
command: target-env cargo check --target=<< parameters.target >> --workspace --benches
- run:
name: Check extra features (like prod image)
- command: target-env cargo check --target=<< parameters.target >> --no-default-features --features="aws,gcp,azure,jemalloc_replacing_malloc,tokio_console,pprof"
+ command: target-env cargo check --target=<< parameters.target >> --no-default-features --features="aws,gcp,azure,jemalloc_replacing_malloc,tokio_console"
- when:
condition:
equal: [ << parameters.target >>, x86_64-pc-windows-gnu ]
@@ -451,7 +430,7 @@ jobs:
command: |
.circleci/scripts/docker_build_release.bash \
"influxdb3" \
- "aws,gcp,azure,jemalloc_replacing_malloc,tokio_console,pprof" \
+ "aws,gcp,azure,jemalloc_replacing_malloc,tokio_console" \
"influxdb3-edge:latest"
# linking might take a while and doesn't produce CLI output
@@ -497,8 +476,6 @@ workflows:
<<: *any_filter
- test:
<<: *any_filter
- - test-heappy:
- <<: *any_filter
- build-dev:
<<: *any_filter
name: build-dev-<< matrix.target >>
@@ -564,7 +541,6 @@ workflows:
- build-release
- sign-packages
- test
- - test-heappy
- doc
- lint
- fmt
diff --git a/.gitignore b/.gitignore
index 3783f3de66..641e0c1e52 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,4 +1,10 @@
+# default results directory for the load generator:
+results/
+# datafusion-cli history:
+**.history
+# rust compiled binary target directory:
**/target
+# other:
**/*.rs.bk
.idea/
.env
@@ -13,4 +19,3 @@ perf.svg
perf.txt
valgrind-out.txt
*.pending-snap
-results/
diff --git a/Cargo.lock b/Cargo.lock
index c8cec39220..c08c6a99e0 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -127,9 +127,9 @@ dependencies = [
[[package]]
name = "anyhow"
-version = "1.0.82"
+version = "1.0.83"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f538837af36e6f6a9be0faa67f9a314f8119e4e4b5867c6ab40ed60360142519"
+checksum = "25bdb32cbbdce2b519a9cd7df3a678443100e265d5e25ca763b7572a5104f5f3"
[[package]]
name = "arc-swap"
@@ -625,9 +625,9 @@ dependencies = [
[[package]]
name = "autocfg"
-version = "1.2.0"
+version = "1.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f1fdabc7756949593fe60f30ec81974b613357de856987752631dea1e3394c80"
+checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0"
[[package]]
name = "axum"
@@ -888,9 +888,9 @@ dependencies = [
[[package]]
name = "cc"
-version = "1.0.96"
+version = "1.0.97"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "065a29261d53ba54260972629f9ca6bffa69bac13cd1fed61420f7fa68b9f8bd"
+checksum = "099a5357d84c4c61eb35fc8eafa9a79a902c2f76911e5747ced4e032edd8d9b4"
dependencies = [
"jobserver",
"libc",
@@ -1149,15 +1149,6 @@ version = "0.8.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f"
-[[package]]
-name = "cpp_demangle"
-version = "0.4.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7e8227005286ec39567949b33df9896bcadfa6051bccca2488129f108ca23119"
-dependencies = [
- "cfg-if",
-]
-
[[package]]
name = "cpufeatures"
version = "0.2.12"
@@ -1669,15 +1660,6 @@ dependencies = [
"workspace-hack",
]
-[[package]]
-name = "debugid"
-version = "0.8.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bef552e6f588e446098f6ba40d89ac146c8c7b64aade83c051ee00bb5d2bc18d"
-dependencies = [
- "uuid",
-]
-
[[package]]
name = "delegate"
version = "0.8.0"
@@ -1934,18 +1916,6 @@ dependencies = [
"windows-sys 0.52.0",
]
-[[package]]
-name = "findshlibs"
-version = "0.10.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "40b9e59cd0f7e0806cca4be089683ecb6434e602038df21fe6bf6711b2f07f64"
-dependencies = [
- "cc",
- "lazy_static",
- "libc",
- "winapi",
-]
-
[[package]]
name = "finl_unicode"
version = "1.2.0"
@@ -2167,9 +2137,9 @@ dependencies = [
[[package]]
name = "getrandom"
-version = "0.2.14"
+version = "0.2.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "94b22e06ecb0110981051723910cbf0b5f5e09a2062dd7663334ee79a9d1286c"
+checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7"
dependencies = [
"cfg-if",
"libc",
@@ -2257,21 +2227,6 @@ dependencies = [
"num-traits",
]
-[[package]]
-name = "heappy"
-version = "0.1.0"
-source = "git+https://github.com/mkmik/heappy?rev=01a1f88e1b404c5894f89eb1a57f813f713d7ad1#01a1f88e1b404c5894f89eb1a57f813f713d7ad1"
-dependencies = [
- "backtrace",
- "bytes",
- "lazy_static",
- "libc",
- "pprof",
- "spin 0.9.8",
- "thiserror",
- "tikv-jemalloc-sys",
-]
-
[[package]]
name = "heck"
version = "0.4.1"
@@ -2483,24 +2438,6 @@ dependencies = [
"hashbrown 0.14.5",
]
-[[package]]
-name = "inferno"
-version = "0.11.19"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "321f0f839cd44a4686e9504b0a62b4d69a50b62072144c71c68f5873c167b8d9"
-dependencies = [
- "ahash",
- "indexmap 2.2.6",
- "is-terminal",
- "itoa",
- "log",
- "num-format",
- "once_cell",
- "quick-xml 0.26.0",
- "rgb",
- "str_stack",
-]
-
[[package]]
name = "influxdb-line-protocol"
version = "1.0.0"
@@ -2620,7 +2557,7 @@ dependencies = [
"metric",
"once_cell",
"tikv-jemalloc-ctl",
- "tikv-jemalloc-sys",
+ "tikv-jemallocator",
"tokio",
"tokio_metrics_bridge",
"uuid",
@@ -3015,7 +2952,6 @@ dependencies = [
"futures",
"generated_types",
"hashbrown 0.14.5",
- "heappy",
"http",
"hyper",
"log",
@@ -3023,7 +2959,6 @@ dependencies = [
"metric_exporters",
"observability_deps",
"parking_lot",
- "pprof",
"reqwest",
"serde",
"serde_json",
@@ -3051,17 +2986,6 @@ version = "2.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3"
-[[package]]
-name = "is-terminal"
-version = "0.4.12"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f23ff5ef2b80d608d61efee834934d862cd92461afc0560dedf493e4c033738b"
-dependencies = [
- "hermit-abi",
- "libc",
- "windows-sys 0.52.0",
-]
-
[[package]]
name = "is_terminal_polyfill"
version = "1.70.0"
@@ -3121,14 +3045,13 @@ dependencies = [
[[package]]
name = "json-patch"
-version = "1.2.0"
+version = "1.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "55ff1e1486799e3f64129f8ccad108b38290df9cd7015cd31bed17239f0789d6"
+checksum = "ec9ad60d674508f3ca8f380a928cfe7b096bc729c4e2dbfe3852bc45da3ab30b"
dependencies = [
"serde",
"serde_json",
"thiserror",
- "treediff",
]
[[package]]
@@ -3469,15 +3392,6 @@ version = "2.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6c8640c5d730cb13ebd907d8d04b52f55ac9a2eec55b440c8892f40d56c76c1d"
-[[package]]
-name = "memmap2"
-version = "0.9.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fe751422e4a8caa417e13c3ea66452215d7d63e19e604f4980461212f3ae1322"
-dependencies = [
- "libc",
-]
-
[[package]]
name = "metric"
version = "0.1.0"
@@ -3640,17 +3554,6 @@ dependencies = [
"workspace-hack",
]
-[[package]]
-name = "nix"
-version = "0.26.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "598beaf3cc6fdd9a5dfb1630c2800c7acd31df7aaf0f565796fba2b53ca1af1b"
-dependencies = [
- "bitflags 1.3.2",
- "cfg-if",
- "libc",
-]
-
[[package]]
name = "nix"
version = "0.28.0"
@@ -3771,16 +3674,6 @@ dependencies = [
"num-traits",
]
-[[package]]
-name = "num-format"
-version = "0.4.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a652d9771a63711fd3c3deb670acfbe5c30a4072e664d7a3bf5a9e1056ac72c3"
-dependencies = [
- "arrayvec",
- "itoa",
-]
-
[[package]]
name = "num-integer"
version = "0.1.46"
@@ -3792,9 +3685,9 @@ dependencies = [
[[package]]
name = "num-iter"
-version = "0.1.44"
+version = "0.1.45"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d869c01cc0c455284163fd0092f1f93835385ccab5a98a0dcc497b2f8bf055a9"
+checksum = "1429034a0490724d0075ebb2bc9e875d6503c3cf69e235a8941aa757d83ef5bf"
dependencies = [
"autocfg",
"num-integer",
@@ -3815,9 +3708,9 @@ dependencies = [
[[package]]
name = "num-traits"
-version = "0.2.18"
+version = "0.2.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "da0df0e5185db44f69b44f26786fe401b6c293d1907744beaa7fa62b2e5a517a"
+checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841"
dependencies = [
"autocfg",
"libm",
@@ -3859,7 +3752,7 @@ dependencies = [
"md-5",
"parking_lot",
"percent-encoding",
- "quick-xml 0.31.0",
+ "quick-xml",
"rand",
"reqwest",
"ring",
@@ -4300,32 +4193,6 @@ version = "3.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "db23d408679286588f4d4644f965003d056e3dd5abcaaa938116871d7ce2fee7"
-[[package]]
-name = "pprof"
-version = "0.13.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ef5c97c51bd34c7e742402e216abdeb44d415fbe6ae41d56b114723e953711cb"
-dependencies = [
- "backtrace",
- "cfg-if",
- "findshlibs",
- "inferno",
- "libc",
- "log",
- "nix 0.26.4",
- "once_cell",
- "parking_lot",
- "prost 0.12.4",
- "prost-build",
- "prost-derive 0.12.4",
- "protobuf",
- "sha2",
- "smallvec",
- "symbolic-demangle",
- "tempfile",
- "thiserror",
-]
-
[[package]]
name = "ppv-lite86"
version = "0.2.17"
@@ -4409,9 +4276,9 @@ dependencies = [
[[package]]
name = "prometheus"
-version = "0.13.3"
+version = "0.13.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "449811d15fbdf5ceb5c1144416066429cf82316e2ec8ce0c1f6f8a02e7bbcf8c"
+checksum = "3d33c28a30771f7f96db69893f78b857f7450d7e0237e9c8fc6427a81bae7ed1"
dependencies = [
"cfg-if",
"fnv",
@@ -4522,12 +4389,6 @@ dependencies = [
"prost 0.12.4",
]
-[[package]]
-name = "protobuf"
-version = "2.28.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "106dd99e98437432fed6519dedecfade6a06a73bb7b2a1e019fdd2bee5778d94"
-
[[package]]
name = "quanta"
version = "0.12.3"
@@ -4559,15 +4420,6 @@ dependencies = [
"workspace-hack",
]
-[[package]]
-name = "quick-xml"
-version = "0.26.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7f50b1c63b38611e7d4d7f68b82d3ad0cc71a2ad2e7f61fc10f1328d917c93cd"
-dependencies = [
- "memchr",
-]
-
[[package]]
name = "quick-xml"
version = "0.31.0"
@@ -4761,15 +4613,6 @@ dependencies = [
"winreg",
]
-[[package]]
-name = "rgb"
-version = "0.8.37"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "05aaa8004b64fd573fc9d002f4e632d51ad4f026c2b5ba95fcb6c2f32c2c47d8"
-dependencies = [
- "bytemuck",
-]
-
[[package]]
name = "ring"
version = "0.17.8"
@@ -4938,9 +4781,9 @@ dependencies = [
[[package]]
name = "schemars"
-version = "0.8.17"
+version = "0.8.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7f55c82c700538496bdc329bb4918a81f87cc8888811bd123cf325a0f2f8d309"
+checksum = "fc6e7ed6919cb46507fb01ff1654309219f62b4d603822501b0b80d42f6f21ef"
dependencies = [
"dyn-clone",
"schemars_derive",
@@ -4950,9 +4793,9 @@ dependencies = [
[[package]]
name = "schemars_derive"
-version = "0.8.17"
+version = "0.8.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "83263746fe5e32097f06356968a077f96089739c927a61450efa069905eec108"
+checksum = "185f2b7aa7e02d418e453790dde16890256bbd2bcd04b7dc5348811052b53f49"
dependencies = [
"proc-macro2",
"quote",
@@ -4988,11 +4831,11 @@ dependencies = [
[[package]]
name = "security-framework"
-version = "2.10.0"
+version = "2.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "770452e37cad93e0a50d5abc3990d2bc351c36d0328f86cefec2f2fb206eaef6"
+checksum = "c627723fd09706bacdb5cf41499e95098555af3c3c29d014dc3c458ef6be11c0"
dependencies = [
- "bitflags 1.3.2",
+ "bitflags 2.5.0",
"core-foundation",
"core-foundation-sys",
"libc",
@@ -5001,9 +4844,9 @@ dependencies = [
[[package]]
name = "security-framework-sys"
-version = "2.10.0"
+version = "2.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "41f3cc463c0ef97e11c3461a9d3787412d30e8e7eb907c79180c4a57bf7c04ef"
+checksum = "317936bbbd05227752583946b9e66d7ce3b489f84e11a94a510b4437fef407d7"
dependencies = [
"core-foundation-sys",
"libc",
@@ -5580,24 +5423,12 @@ dependencies = [
"uuid",
]
-[[package]]
-name = "stable_deref_trait"
-version = "1.2.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3"
-
[[package]]
name = "static_assertions"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f"
-[[package]]
-name = "str_stack"
-version = "0.1.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9091b6114800a5f2141aee1d1b9d6ca3592ac062dc5decb3764ec5895a47b4eb"
-
[[package]]
name = "stringprep"
version = "0.1.4"
@@ -5649,29 +5480,6 @@ version = "2.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc"
-[[package]]
-name = "symbolic-common"
-version = "12.8.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1cccfffbc6bb3bb2d3a26cd2077f4d055f6808d266f9d4d158797a4c60510dfe"
-dependencies = [
- "debugid",
- "memmap2",
- "stable_deref_trait",
- "uuid",
-]
-
-[[package]]
-name = "symbolic-demangle"
-version = "12.8.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "76a99812da4020a67e76c4eb41f08c87364c14170495ff780f30dd519c221a68"
-dependencies = [
- "cpp_demangle",
- "rustc-demangle",
- "symbolic-common",
-]
-
[[package]]
name = "syn"
version = "1.0.109"
@@ -5702,9 +5510,9 @@ checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160"
[[package]]
name = "sysinfo"
-version = "0.30.11"
+version = "0.30.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "87341a165d73787554941cd5ef55ad728011566fe714e987d1b976c15dbc3a83"
+checksum = "732ffa00f53e6b2af46208fba5718d9662a421049204e156328b66791ffa15ae"
dependencies = [
"cfg-if",
"core-foundation-sys",
@@ -5800,7 +5608,7 @@ dependencies = [
"iox_query_params",
"mutable_batch_lp",
"mutable_batch_pb",
- "nix 0.28.0",
+ "nix",
"observability_deps",
"once_cell",
"parking_lot",
@@ -5892,6 +5700,16 @@ dependencies = [
"libc",
]
+[[package]]
+name = "tikv-jemallocator"
+version = "0.5.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "965fe0c26be5c56c94e38ba547249074803efd52adfb66de62107d95aab3eaca"
+dependencies = [
+ "libc",
+ "tikv-jemalloc-sys",
+]
+
[[package]]
name = "tiny-keccak"
version = "2.0.2"
@@ -5980,9 +5798,9 @@ dependencies = [
[[package]]
name = "tokio-util"
-version = "0.7.10"
+version = "0.7.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5419f34732d9eb6ee4c3578b7989078579b7f039cbbb9ca2c4da015749371e15"
+checksum = "9cf6b47b3771c49ac75ad09a6162f53ad4b8088b76ac60e8ec1455b31a189fe1"
dependencies = [
"bytes",
"futures-core",
@@ -5990,7 +5808,6 @@ dependencies = [
"pin-project-lite",
"slab",
"tokio",
- "tracing",
]
[[package]]
@@ -6326,15 +6143,6 @@ dependencies = [
"workspace-hack",
]
-[[package]]
-name = "treediff"
-version = "4.0.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4d127780145176e2b5d16611cc25a900150e86e9fd79d3bde6ff3a37359c9cb5"
-dependencies = [
- "serde_json",
-]
-
[[package]]
name = "triomphe"
version = "0.1.11"
@@ -6957,18 +6765,18 @@ checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec"
[[package]]
name = "zerocopy"
-version = "0.7.32"
+version = "0.7.33"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "74d4d3961e53fa4c9a25a8637fc2bfaf2595b3d3ae34875568a5cf64787716be"
+checksum = "087eca3c1eaf8c47b94d02790dd086cd594b912d2043d4de4bfdd466b3befb7c"
dependencies = [
"zerocopy-derive",
]
[[package]]
name = "zerocopy-derive"
-version = "0.7.32"
+version = "0.7.33"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6"
+checksum = "6f4b6c273f496d8fd4eaf18853e6b448760225dc030ff2c485a786859aea6393"
dependencies = [
"proc-macro2",
"quote",
diff --git a/influxdb3/Cargo.toml b/influxdb3/Cargo.toml
index c0c68d1ba0..7c5edc0d55 100644
--- a/influxdb3/Cargo.toml
+++ b/influxdb3/Cargo.toml
@@ -57,24 +57,15 @@ default = ["jemalloc_replacing_malloc", "azure", "gcp", "aws"]
azure = ["clap_blocks/azure"] # Optional Azure Object store support
gcp = ["clap_blocks/gcp"] # Optional GCP object store support
aws = ["clap_blocks/aws"] # Optional AWS / S3 object store support
-pprof = ["ioxd_common/pprof"] # Optional http://localhost:8080/debug/pprof/profile support
-heappy = ["ioxd_common/heappy", "influxdb3_process/heappy"] # Optional http://localhost:8080/debug/pproc/alloc support
# Enable tokio_console support (https://github.com/tokio-rs/console)
#
# Requires enabling trace level tracing events for [tokio,runtime].
tokio_console = ["console-subscriber", "tokio/tracing", "observability_deps/release_max_level_trace"]
-# heappy is an optional feature; Not on by default as it
-# runtime overhead on all allocations (calls to malloc).
-# Cargo cannot currently implement mutually exclusive features so let's force every build
-# to pick either heappy or jemalloc_replacing_malloc feature at least until we figure out something better.
+# Use jemalloc as the default allocator.
jemalloc_replacing_malloc = ["influxdb3_process/jemalloc_replacing_malloc"]
-# Implicit feature selected when running under `clippy --all-features` to accept mutable exclusive features during
-# linting
-clippy = []
-
[dev-dependencies]
# Core Crates
arrow_util.workspace = true
diff --git a/influxdb3/src/main.rs b/influxdb3/src/main.rs
index f68c68bb4f..8ca3b9306a 100644
--- a/influxdb3/src/main.rs
+++ b/influxdb3/src/main.rs
@@ -36,13 +36,6 @@ enum ReturnCode {
Failure = 1,
}
-#[cfg(all(
- feature = "heappy",
- feature = "jemalloc_replacing_malloc",
- not(feature = "clippy")
-))]
-compile_error!("heappy and jemalloc_replacing_malloc features are mutually exclusive");
-
#[derive(Debug, clap::Parser)]
#[clap(
name = "influxdb3",
diff --git a/influxdb3_process/Cargo.toml b/influxdb3_process/Cargo.toml
index 8524448888..f7bb6c60a8 100644
--- a/influxdb3_process/Cargo.toml
+++ b/influxdb3_process/Cargo.toml
@@ -21,21 +21,13 @@ uuid.workspace = true
# Optional Dependencies
[target.'cfg(not(target_env = "msvc"))'.dependencies]
tikv-jemalloc-ctl = { version = "0.5.4", optional = true }
-tikv-jemalloc-sys = { version = "0.5.4", optional = true, features = ["unprefixed_malloc_on_supported_platforms"] }
+tikv-jemallocator = { version = "0.5", optional = true, features = ["unprefixed_malloc_on_supported_platforms"] }
[features]
default = ["jemalloc_replacing_malloc"]
-heappy = []
-# heappy is an optional feature; Not on by default as it
-# runtime overhead on all allocations (calls to malloc).
-# Cargo cannot currently implement mutually exclusive features so let's force every build
-# to pick either heappy or jemalloc_replacing_malloc feature at least until we figure out something better.
-jemalloc_replacing_malloc = ["tikv-jemalloc-sys", "tikv-jemalloc-ctl"]
-
-# Implicit feature selected when running under `clippy --all-features` to accept mutable exclusive features during
-# linting
-clippy = []
+# Use jemalloc as the allocator.
+jemalloc_replacing_malloc = ["tikv-jemallocator", "tikv-jemalloc-ctl"]
[lints]
workspace = true
diff --git a/influxdb3_process/src/lib.rs b/influxdb3_process/src/lib.rs
index 854c8ef2ba..3ec1c6ce41 100644
--- a/influxdb3_process/src/lib.rs
+++ b/influxdb3_process/src/lib.rs
@@ -7,34 +7,22 @@ use once_cell::sync::Lazy;
/// The process name on the local OS running `influxdb3`
pub const INFLUXDB3_PROCESS_NAME: &str = "influxdb3";
-#[cfg(all(
- not(feature = "heappy"),
- feature = "jemalloc_replacing_malloc",
- not(target_env = "msvc")
-))]
+#[cfg(feature = "jemalloc_replacing_malloc")]
+#[global_allocator]
+static GLOBAL: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc;
+
+#[cfg(all(feature = "jemalloc_replacing_malloc", not(target_env = "msvc")))]
pub mod jemalloc;
#[cfg(tokio_unstable)]
use tokio_metrics_bridge::setup_tokio_metrics;
-#[cfg(any(
- all(not(feature = "heappy"), not(feature = "jemalloc_replacing_malloc")),
- target_env = "msvc"
-))]
+#[cfg(any(not(feature = "jemalloc_replacing_malloc"), target_env = "msvc"))]
pub fn build_malloc_conf() -> String {
"system".to_string()
}
-#[cfg(all(feature = "heappy", not(feature = "jemalloc_replacing_malloc")))]
-pub fn build_malloc_conf() -> String {
- "heappy".to_string()
-}
-
-#[cfg(all(
- not(feature = "heappy"),
- feature = "jemalloc_replacing_malloc",
- not(target_env = "msvc")
-))]
+#[cfg(all(feature = "jemalloc_replacing_malloc", not(target_env = "msvc")))]
pub fn build_malloc_conf() -> String {
tikv_jemalloc_ctl::config::malloc_conf::mib()
.unwrap()
@@ -43,20 +31,6 @@ pub fn build_malloc_conf() -> String {
.to_string()
}
-#[cfg(all(
- feature = "heappy",
- feature = "jemalloc_replacing_malloc",
- not(feature = "clippy")
-))]
-pub fn build_malloc_conf() -> String {
- compile_error!("must use exactly one memory allocator")
-}
-
-#[cfg(feature = "clippy")]
-pub fn build_malloc_conf() -> String {
- "clippy".to_string()
-}
-
/// Package version.
pub static INFLUXDB3_VERSION: Lazy<&'static str> =
Lazy::new(|| option_env!("CARGO_PKG_VERSION").unwrap_or("UNKNOWN"));
@@ -111,11 +85,7 @@ pub fn setup_metric_registry() -> Arc<metric::Registry> {
.set(PROCESS_START_TIME.timestamp() as u64);
// Register jemalloc metrics
- #[cfg(all(
- not(feature = "heappy"),
- feature = "jemalloc_replacing_malloc",
- not(target_env = "msvc")
- ))]
+ #[cfg(all(feature = "jemalloc_replacing_malloc", not(target_env = "msvc")))]
registry.register_instrument("jemalloc_metrics", crate::jemalloc::JemallocMetrics::new);
// Register tokio metric for main runtime
diff --git a/influxdb3_server/src/http.rs b/influxdb3_server/src/http.rs
index 107dcc0747..a4a2eb3a8d 100644
--- a/influxdb3_server/src/http.rs
+++ b/influxdb3_server/src/http.rs
@@ -39,7 +39,6 @@ use serde::Deserialize;
use serde::Serialize;
use std::convert::Infallible;
use std::fmt::Debug;
-use std::num::NonZeroI32;
use std::pin::Pin;
use std::str::Utf8Error;
use std::string::FromUtf8Error;
@@ -104,18 +103,6 @@ pub enum Error {
#[error("unsupported method")]
UnsupportedMethod,
- /// PProf support is not compiled
- #[error("pprof support is not compiled")]
- PProfIsNotCompiled,
-
- /// Heappy support is not compiled
- #[error("heappy support is not compiled")]
- HeappyIsNotCompiled,
-
- #[cfg(feature = "heappy")]
- #[error("heappy error: {0}")]
- Heappy(heappy::Error),
-
/// Hyper serving error
#[error("error serving http: {0}")]
ServingHttp(#[from] hyper::Error),
@@ -956,9 +943,6 @@ where
(Method::GET, "/health" | "/api/v1/health") => http_server.health(),
(Method::GET | Method::POST, "/ping") => http_server.ping(),
(Method::GET, "/metrics") => http_server.handle_metrics(),
- (Method::GET, "/debug/pprof") => pprof_home(req).await,
- (Method::GET, "/debug/pprof/profile") => pprof_profile(req).await,
- (Method::GET, "/debug/pprof/allocs") => pprof_heappy_profile(req).await,
_ => {
let body = Body::from("not found");
Ok(Response::builder()
@@ -996,162 +980,6 @@ fn legacy_write_error_to_response(e: WriteParseError) -> Response<Body> {
Response::builder().status(status).body(body).unwrap()
}
-async fn pprof_home(req: Request<Body>) -> Result<Response<Body>> {
- let default_host = HeaderValue::from_static("localhost");
- let host = req
- .headers()
- .get("host")
- .unwrap_or(&default_host)
- .to_str()
- .unwrap_or_default();
- let profile_cmd = format!(
- "/debug/pprof/profile?seconds={}",
- PProfArgs::default_seconds()
- );
- let allocs_cmd = format!(
- "/debug/pprof/allocs?seconds={}",
- PProfAllocsArgs::default_seconds()
- );
- Ok(Response::new(Body::from(format!(
- r#"<a href="{profile_cmd}">http://{host}{profile_cmd}</a><br><a href="{allocs_cmd}">http://{host}{allocs_cmd}</a>"#,
- ))))
-}
-
-#[derive(Debug, Deserialize)]
-struct PProfArgs {
- #[serde(default = "PProfArgs::default_seconds")]
- #[allow(dead_code)]
- seconds: u64,
- #[serde(default = "PProfArgs::default_frequency")]
- #[allow(dead_code)]
- frequency: NonZeroI32,
-}
-
-impl PProfArgs {
- fn default_seconds() -> u64 {
- 30
- }
-
- // 99Hz to avoid coinciding with special periods
- fn default_frequency() -> NonZeroI32 {
- NonZeroI32::new(99).unwrap()
- }
-}
-
-#[derive(Debug, Deserialize)]
-struct PProfAllocsArgs {
- #[serde(default = "PProfAllocsArgs::default_seconds")]
- #[allow(dead_code)]
- seconds: u64,
- // The sampling interval is a number of bytes that have to cumulatively allocated for a sample to be taken.
- //
- // For example if the sampling interval is 99, and you're doing a million of 40 bytes allocations,
- // the allocations profile will account for 16MB instead of 40MB.
- // Heappy will adjust the estimate for sampled recordings, but now that feature is not yet implemented.
- #[serde(default = "PProfAllocsArgs::default_interval")]
- #[allow(dead_code)]
- interval: NonZeroI32,
-}
-
-impl PProfAllocsArgs {
- fn default_seconds() -> u64 {
- 30
- }
-
- // 1 means: sample every allocation.
- fn default_interval() -> NonZeroI32 {
- NonZeroI32::new(1).unwrap()
- }
-}
-
-#[cfg(feature = "pprof")]
-async fn pprof_profile(req: Request<Body>) -> Result<Response<Body>, ApplicationError> {
- use ::pprof::protos::Message;
- use snafu::ResultExt;
-
- let query_string = req.uri().query().unwrap_or_default();
- let query: PProfArgs = serde_urlencoded::from_str(query_string)
- .context(InvalidQueryStringSnafu { query_string })?;
-
- let report = self::pprof::dump_rsprof(query.seconds, query.frequency.get())
- .await
- .map_err(|e| Box::new(e) as _)
- .context(PProfSnafu)?;
-
- let mut body: Vec<u8> = Vec::new();
-
- // render flamegraph when opening in the browser
- // otherwise render as protobuf; works great with: go tool pprof http://..../debug/pprof/profile
- if req
- .headers()
- .get_all("Accept")
- .iter()
- .flat_map(|i| i.to_str().unwrap_or_default().split(','))
- .any(|i| i == "text/html" || i == "image/svg+xml")
- {
- report
- .flamegraph(&mut body)
- .map_err(|e| Box::new(e) as _)
- .context(PProfSnafu)?;
- if body.is_empty() {
- return EmptyFlamegraphSnafu.fail();
- }
- } else {
- let profile = report
- .pprof()
- .map_err(|e| Box::new(e) as _)
- .context(PProfSnafu)?;
- profile
- .encode(&mut body)
- .map_err(|e| Box::new(e) as _)
- .context(ProstSnafu)?;
- }
-
- Ok(Response::new(Body::from(body)))
-}
-
-#[cfg(not(feature = "pprof"))]
-async fn pprof_profile(_req: Request<Body>) -> Result<Response<Body>> {
- Err(Error::PProfIsNotCompiled)
-}
-
-// If heappy support is enabled, call it
-#[cfg(feature = "heappy")]
-async fn pprof_heappy_profile(req: Request<Body>) -> Result<Response<Body>> {
- let query_string = req.uri().query().unwrap_or_default();
- let query: PProfAllocsArgs = serde_urlencoded::from_str(query_string)?;
-
- let report = self::heappy::dump_heappy_rsprof(query.seconds, query.interval.get()).await?;
-
- let mut body: Vec<u8> = Vec::new();
-
- // render flamegraph when opening in the browser
- // otherwise render as protobuf;
- // works great with: go tool pprof http://..../debug/pprof/allocs
- if req
- .headers()
- .get_all("Accept")
- .iter()
- .flat_map(|i| i.to_str().unwrap_or_default().split(','))
- .any(|i| i == "text/html" || i == "image/svg+xml")
- {
- report.flamegraph(&mut body);
- if body.is_empty() {
- return EmptyFlamegraphSnafu.fail();
- }
- } else {
- report.write_pprof(&mut body)?
- }
-
- Ok(Response::new(Body::from(body)))
-}
-
-// Return error if heappy not enabled
-#[cfg(not(feature = "heappy"))]
-async fn pprof_heappy_profile(_req: Request<Body>) -> Result<Response<Body>> {
- Err(Error::HeappyIsNotCompiled)
-}
-
#[cfg(test)]
mod tests {
use super::validate_db_name;
|
7e921e6a234feaa999b71a5f283a0a5a6bd3925a
|
Carol (Nichols || Goulding)
|
2023-01-11 11:38:01
|
Make recording num parquet files an explicit test step
|
To support a case where someone calls WriteLineProtocol twice in
a row to simulate two write requests. The test should be able to
record this state before the two write requests and not twice.
| null |
fix: Make recording num parquet files an explicit test step
To support a case where someone calls WriteLineProtocol twice in
a row to simulate two write requests. The test should be able to
record this state before the two write requests and not twice.
|
diff --git a/influxdb_iox/tests/end_to_end_cases/querier.rs b/influxdb_iox/tests/end_to_end_cases/querier.rs
index 4bdab22727..9dcb496f6c 100644
--- a/influxdb_iox/tests/end_to_end_cases/querier.rs
+++ b/influxdb_iox/tests/end_to_end_cases/querier.rs
@@ -901,6 +901,7 @@ mod kafkaless_rpc_write {
StepTest::new(
&mut cluster,
vec![
+ Step::RecordNumParquetFiles,
Step::WriteLineProtocol(format!("{},tag1=A,tag2=B val=42i 123456", table_name)),
// Wait for data to be persisted to parquet
Step::WaitForPersisted2,
@@ -945,6 +946,7 @@ mod kafkaless_rpc_write {
StepTest::new(
&mut cluster,
vec![
+ Step::RecordNumParquetFiles,
Step::WriteLineProtocol(format!("{},tag1=A,tag2=B val=42i 123456", table_name)),
Step::WaitForPersisted2,
Step::Query {
@@ -978,6 +980,7 @@ mod kafkaless_rpc_write {
let mut cluster = MiniCluster::create_shared2(database_url).await;
let steps = vec![
+ Step::RecordNumParquetFiles,
Step::WriteLineProtocol(format!("{},tag1=A,tag2=B val=42i 123456", table_name)),
// Wait for data to be persisted to parquet
Step::WaitForPersisted2,
@@ -1002,6 +1005,7 @@ mod kafkaless_rpc_write {
"+------+------+--------------------------------+-----+",
],
},
+ Step::RecordNumParquetFiles,
// write another parquet file that has non duplicated data
Step::WriteLineProtocol(format!("{},tag1=B,tag2=A val=43i 789101112", table_name)),
// Wait for data to be persisted to parquet
diff --git a/test_helpers_end_to_end/src/steps.rs b/test_helpers_end_to_end/src/steps.rs
index 756ce693e8..2dd0ba6b52 100644
--- a/test_helpers_end_to_end/src/steps.rs
+++ b/test_helpers_end_to_end/src/steps.rs
@@ -143,6 +143,12 @@ pub enum Step {
/// Wait for all previously written data to be persisted
WaitForPersisted,
+ /// Ask the catalog service how many Parquet files it has for this cluster's namespace. Do this
+ /// before a write where you're interested in when the write has been persisted to Parquet;
+ /// then after the write use `WaitForPersisted2` to observe the change in the number of Parquet
+ /// files from the value this step recorded.
+ RecordNumParquetFiles,
+
/// Wait for all previously written data to be persisted by observing an increase in the number
/// of Parquet files in the catalog for this cluster's namespace. Needed for
/// router2/ingester2/querier2.
@@ -235,9 +241,6 @@ impl<'a> StepTest<'a> {
"====Begin writing line protocol to v2 HTTP API:\n{}",
line_protocol
);
- // Get the current number of Parquet files in the cluster's namespace before
- // starting a new write so we can observe a change when waiting for persistence.
- state.record_num_parquet_files().await;
let response = state.cluster.write_to_router(line_protocol).await;
assert_eq!(response.status(), StatusCode::NO_CONTENT);
let write_token = get_write_token(&response);
@@ -262,6 +265,11 @@ impl<'a> StepTest<'a> {
}
info!("====Done waiting for all write tokens to be persisted");
}
+ // Get the current number of Parquet files in the cluster's namespace before
+ // starting a new write so we can observe a change when waiting for persistence.
+ Step::RecordNumParquetFiles => {
+ state.record_num_parquet_files().await;
+ }
Step::WaitForPersisted2 => {
info!("====Begin waiting for a change in the number of Parquet files");
state.wait_for_num_parquet_file_change().await;
|
f1058dccf6dc61a825c9b7ab0a990a82a889a6c2
|
Dom Dwyer
|
2023-06-16 12:12:24
|
proptest model timestamp distribution
|
Changes the partitioner proptest to use a timestamp generation strategy
that more accurately models the distribution of timestamps in real-world
requests.
| null |
test: proptest model timestamp distribution
Changes the partitioner proptest to use a timestamp generation strategy
that more accurately models the distribution of timestamps in real-world
requests.
|
diff --git a/mutable_batch/src/payload/partition.rs b/mutable_batch/src/payload/partition.rs
index fb91b7ff9a..114da03672 100644
--- a/mutable_batch/src/payload/partition.rs
+++ b/mutable_batch/src/payload/partition.rs
@@ -1082,6 +1082,28 @@ mod tests {
}
}
+ prop_compose! {
+ /// Yield a Vec containing an identical timestamp run of random length,
+ /// up to `max_run_len`,
+ fn arbitrary_timestamp_run(max_run_len: usize)(v in 0_i64..i64::MAX, run_len in 1..max_run_len) -> Vec<i64> {
+ let mut x = Vec::with_capacity(run_len);
+ x.resize(max_run_len, v);
+ x
+ }
+ }
+
+ /// Yield a Vec of timestamp values that more accurately model real
+ /// timestamps than pure random selection.
+ ///
+ /// Runs of identical timestamps are generated with
+ /// [`arbitrary_timestamp_run()`], which are then shuffled to produce a list
+ /// of timestamps with limited repeats, sometimes consecutively.
+ fn arbitrary_timestamps() -> impl Strategy<Value = Vec<i64>> {
+ proptest::collection::vec(arbitrary_timestamp_run(6), 10..100)
+ .prop_map(|v| v.into_iter().flatten().collect::<Vec<_>>())
+ .prop_shuffle()
+ }
+
proptest! {
/// A property test that asserts a write comprised of an arbitrary
/// subset of [`TEST_TAG_NAME_SET`] with randomised values, that is
@@ -1157,7 +1179,7 @@ mod tests {
}
/// A property test that asserts the partitioner tolerates (does not
- /// panic) randomised, potentially invalid strfitme formatter strings.
+ /// panic) randomised, potentially invalid strftime formatter strings.
#[test]
fn prop_arbitrary_strftime_format(fmt in any::<String>()) {
let mut batch = MutableBatch::new();
@@ -1205,13 +1227,13 @@ mod tests {
}
}
- // Drives the stftime formatter through the "front door", using the same
- // interface as a user would call to partition data. This validates the
- // integration between the various formatters, range encoders, dedupe,
- // etc.
+ // Drives the strftime formatter through the "front door", using the
+ // same interface as a user would call to partition data. This validates
+ // the integration between the various formatters, range encoders,
+ // dedupe, etc.
#[test]
fn prop_strftime_integration(
- times in proptest::collection::vec(0_i64..i64::MAX, 10..100),
+ times in arbitrary_timestamps(),
format in prop_oneof![
Just("%Y-%m-%d"), // Default scheme
Just("%s") // Unix seconds, to drive increased cache miss rate in strftime formatter
|
942a6100b5dc39b56c4f8fb57f9b6303ee4addd7
|
Marco Neumann
|
2022-12-02 12:14:16
|
check schemas in `pretty_print_batches` (#6309)
|
* fix: check schemas in `pretty_print_batches`
I think most users of this function (and `assert_batches_eq`) assume
that all batches have the same schema. If not, `pretty_print_batches`
may either fail producing an actual table (some rows may have more or
less columns) or silently produce a table that looks "alright".
* fix: equalize schemas where it is required/desired
|
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
fix: check schemas in `pretty_print_batches` (#6309)
* fix: check schemas in `pretty_print_batches`
I think most users of this function (and `assert_batches_eq`) assume
that all batches have the same schema. If not, `pretty_print_batches`
may either fail producing an actual table (some rows may have more or
less columns) or silently produce a table that looks "alright".
* fix: equalize schemas where it is required/desired
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
diff --git a/arrow_util/src/display.rs b/arrow_util/src/display.rs
index b3f8c8ea8a..868dc6ad22 100644
--- a/arrow_util/src/display.rs
+++ b/arrow_util/src/display.rs
@@ -86,7 +86,16 @@ fn create_table(results: &[RecordBatch]) -> Result<Table> {
}
table.set_header(header);
- for batch in results {
+ for (i, batch) in results.iter().enumerate() {
+ if batch.schema() != schema {
+ return Err(ArrowError::SchemaError(format!(
+ "Batches have different schemas:\n\nFirst:\n{}\n\nBatch {}:\n{}",
+ schema,
+ i + 1,
+ batch.schema()
+ )));
+ }
+
for row in 0..batch.num_rows() {
let mut cells = Vec::new();
for col in 0..batch.num_columns() {
@@ -112,6 +121,7 @@ mod tests {
},
datatypes::Int32Type,
};
+ use datafusion::common::assert_contains;
#[test]
fn test_formatting() {
@@ -182,4 +192,16 @@ mod tests {
expected, actual
);
}
+
+ #[test]
+ fn test_pretty_format_batches_checks_schemas() {
+ let int64_array: ArrayRef = Arc::new([Some(2)].iter().collect::<Int64Array>());
+ let uint64_array: ArrayRef = Arc::new([Some(2)].iter().collect::<UInt64Array>());
+
+ let batch1 = RecordBatch::try_from_iter(vec![("col", int64_array)]).unwrap();
+ let batch2 = RecordBatch::try_from_iter(vec![("col", uint64_array)]).unwrap();
+
+ let err = pretty_format_batches(&[batch1, batch2]).unwrap_err();
+ assert_contains!(err.to_string(), "Batches have different schemas:");
+ }
}
diff --git a/arrow_util/src/test_util.rs b/arrow_util/src/test_util.rs
index cee66a4717..8f3a433db1 100644
--- a/arrow_util/src/test_util.rs
+++ b/arrow_util/src/test_util.rs
@@ -2,8 +2,10 @@
use std::sync::Arc;
use arrow::{
- array::{ArrayRef, StringArray},
+ array::{new_null_array, ArrayRef, StringArray},
compute::kernels::sort::{lexsort, SortColumn, SortOptions},
+ datatypes::Schema,
+ error::ArrowError,
record_batch::RecordBatch,
};
@@ -131,3 +133,26 @@ where
})
.collect()
}
+
+/// Equalize batch schemas by creating NULL columns.
+pub fn equalize_batch_schemas(batches: Vec<RecordBatch>) -> Result<Vec<RecordBatch>, ArrowError> {
+ let common_schema = Arc::new(Schema::try_merge(
+ batches.iter().map(|batch| batch.schema().as_ref().clone()),
+ )?);
+
+ Ok(batches
+ .into_iter()
+ .map(|batch| {
+ let batch_schema = batch.schema();
+ let columns = common_schema
+ .fields()
+ .iter()
+ .map(|field| match batch_schema.index_of(field.name()) {
+ Ok(idx) => Arc::clone(batch.column(idx)),
+ Err(_) => new_null_array(field.data_type(), batch.num_rows()),
+ })
+ .collect();
+ RecordBatch::try_new(Arc::clone(&common_schema), columns).unwrap()
+ })
+ .collect())
+}
diff --git a/ingester/src/querier_handler.rs b/ingester/src/querier_handler.rs
index dc900d34e3..16a1ac7141 100644
--- a/ingester/src/querier_handler.rs
+++ b/ingester/src/querier_handler.rs
@@ -2,9 +2,12 @@
use std::{pin::Pin, sync::Arc};
-use arrow::{array::new_null_array, error::ArrowError, record_batch::RecordBatch};
-use arrow_util::optimize::{
- prepare_batch_for_flight, prepare_schema_for_flight, split_batch_for_grpc_response,
+use arrow::{error::ArrowError, record_batch::RecordBatch};
+use arrow_util::{
+ optimize::{
+ prepare_batch_for_flight, prepare_schema_for_flight, split_batch_for_grpc_response,
+ },
+ test_util::equalize_batch_schemas,
};
use data_types::{NamespaceId, PartitionId, SequenceNumber, TableId};
use datafusion::physical_plan::SendableRecordBatchStream;
@@ -12,7 +15,7 @@ use datafusion_util::MemoryStream;
use futures::{Stream, StreamExt, TryStreamExt};
use generated_types::ingester::IngesterQueryRequest;
use observability_deps::tracing::*;
-use schema::{merge::SchemaMerger, Projection};
+use schema::Projection;
use snafu::{ensure, Snafu};
use trace::span::{Span, SpanRecorder};
@@ -187,7 +190,6 @@ impl IngesterQueryResponse {
/// do not line up with the snapshot-scoped record batches.
pub async fn into_record_batches(self) -> Vec<RecordBatch> {
let mut snapshot_schema = None;
- let mut schema_merger = SchemaMerger::new();
let mut batches = vec![];
let mut stream = self.flatten();
@@ -201,33 +203,13 @@ impl IngesterQueryResponse {
}
FlatIngesterQueryResponse::StartSnapshot { schema } => {
snapshot_schema = Some(Arc::clone(&schema));
-
- schema_merger = schema_merger
- .merge(&schema::Schema::try_from(schema).unwrap())
- .unwrap();
}
}
}
assert!(!batches.is_empty());
- // equalize schemas
- let common_schema = schema_merger.build().as_arrow();
- batches
- .into_iter()
- .map(|batch| {
- let batch_schema = batch.schema();
- let columns = common_schema
- .fields()
- .iter()
- .map(|field| match batch_schema.index_of(field.name()) {
- Ok(idx) => Arc::clone(batch.column(idx)),
- Err(_) => new_null_array(field.data_type(), batch.num_rows()),
- })
- .collect();
- RecordBatch::try_new(Arc::clone(&common_schema), columns).unwrap()
- })
- .collect()
+ equalize_batch_schemas(batches).unwrap()
}
}
diff --git a/iox_query/src/provider.rs b/iox_query/src/provider.rs
index 9fde787a99..700dcc814c 100644
--- a/iox_query/src/provider.rs
+++ b/iox_query/src/provider.rs
@@ -1371,7 +1371,9 @@ mod test {
use super::*;
use crate::test::{raw_data, TestChunk};
use arrow::datatypes::DataType;
- use arrow_util::{assert_batches_eq, assert_batches_sorted_eq};
+ use arrow_util::{
+ assert_batches_eq, assert_batches_sorted_eq, test_util::equalize_batch_schemas,
+ };
use datafusion::physical_plan::displayable;
use datafusion_util::test_collect;
use schema::{builder::SchemaBuilder, TIME_COLUMN_NAME};
@@ -2170,27 +2172,30 @@ mod test {
let chunks = vec![chunk1, chunk2, chunk3];
// data in its original form
let expected = vec![
- "+-----------+------+--------------------------------+--------------------------------+",
- "| field_int | tag1 | tag2 | time |",
- "+-----------+------+--------------------------------+--------------------------------+",
- "| 1000 | MT | CT | 1970-01-01T00:00:00.000001Z |",
- "| 10 | MT | AL | 1970-01-01T00:00:00.000007Z |",
- "| 70 | CT | CT | 1970-01-01T00:00:00.000000100Z |",
- "| 100 | AL | MA | 1970-01-01T00:00:00.000000050Z |",
- "| 5 | MT | AL | 1970-01-01T00:00:00.000005Z |",
- "| 1000 | MT | 1970-01-01T00:00:00.000001Z | |",
- "| 10 | MT | 1970-01-01T00:00:00.000007Z | |",
- "| 70 | CT | 1970-01-01T00:00:00.000000100Z | |",
- "| 100 | AL | 1970-01-01T00:00:00.000000050Z | |",
- "| 5 | MT | 1970-01-01T00:00:00.000005Z | |",
- "| 1000 | MT | 1970-01-01T00:00:00.000001Z | |",
- "| 10 | MT | 1970-01-01T00:00:00.000007Z | |",
- "| 70 | CT | 1970-01-01T00:00:00.000000100Z | |",
- "| 100 | AL | 1970-01-01T00:00:00.000000050Z | |",
- "| 5 | MT | 1970-01-01T00:00:00.000005Z | |",
- "+-----------+------+--------------------------------+--------------------------------+",
+ "+-----------+------+------+--------------------------------+-----------------+",
+ "| field_int | tag1 | tag2 | time | other_field_int |",
+ "+-----------+------+------+--------------------------------+-----------------+",
+ "| 1000 | MT | CT | 1970-01-01T00:00:00.000001Z | |",
+ "| 10 | MT | AL | 1970-01-01T00:00:00.000007Z | |",
+ "| 70 | CT | CT | 1970-01-01T00:00:00.000000100Z | |",
+ "| 100 | AL | MA | 1970-01-01T00:00:00.000000050Z | |",
+ "| 5 | MT | AL | 1970-01-01T00:00:00.000005Z | |",
+ "| | MT | | 1970-01-01T00:00:00.000001Z | 1000 |",
+ "| | MT | | 1970-01-01T00:00:00.000007Z | 10 |",
+ "| | CT | | 1970-01-01T00:00:00.000000100Z | 70 |",
+ "| | AL | | 1970-01-01T00:00:00.000000050Z | 100 |",
+ "| | MT | | 1970-01-01T00:00:00.000005Z | 5 |",
+ "| | MT | | 1970-01-01T00:00:00.000001Z | 1000 |",
+ "| | MT | | 1970-01-01T00:00:00.000007Z | 10 |",
+ "| | CT | | 1970-01-01T00:00:00.000000100Z | 70 |",
+ "| | AL | | 1970-01-01T00:00:00.000000050Z | 100 |",
+ "| | MT | | 1970-01-01T00:00:00.000005Z | 5 |",
+ "+-----------+------+------+--------------------------------+-----------------+",
];
- assert_batches_eq!(&expected, &raw_data(&chunks).await);
+ assert_batches_eq!(
+ &expected,
+ &equalize_batch_schemas(raw_data(&chunks).await).unwrap()
+ );
// request just the fields
let schema = SchemaBuilder::new()
@@ -2283,27 +2288,30 @@ mod test {
let chunks = vec![chunk1, chunk2, chunk3];
// data in its original form
let expected = vec![
- "+-----------+------+------+--------------------------------+",
- "| field_int | tag1 | tag2 | time |",
- "+-----------+------+------+--------------------------------+",
- "| 1000 | MT | CT | 1970-01-01T00:00:00.000001Z |",
- "| 10 | MT | AL | 1970-01-01T00:00:00.000007Z |",
- "| 70 | CT | CT | 1970-01-01T00:00:00.000000100Z |",
- "| 100 | AL | MA | 1970-01-01T00:00:00.000000050Z |",
- "| 5 | MT | AL | 1970-01-01T00:00:00.000005Z |",
- "| 1000 | MT | CT | 1970-01-01T00:00:00.000001Z |",
- "| 10 | MT | MT | 1970-01-01T00:00:00.000007Z |",
- "| 70 | CT | AL | 1970-01-01T00:00:00.000000100Z |",
- "| 100 | AL | AL | 1970-01-01T00:00:00.000000050Z |",
- "| 5 | MT | MT | 1970-01-01T00:00:00.000005Z |",
- "| 1000 | 1000 | CT | 1970-01-01T00:00:00.000001Z |",
- "| 10 | 10 | MT | 1970-01-01T00:00:00.000007Z |",
- "| 70 | 70 | AL | 1970-01-01T00:00:00.000000100Z |",
- "| 100 | 100 | AL | 1970-01-01T00:00:00.000000050Z |",
- "| 5 | 5 | MT | 1970-01-01T00:00:00.000005Z |",
- "+-----------+------+------+--------------------------------+",
+ "+-----------+------+------+--------------------------------+------+------------+",
+ "| field_int | tag1 | tag2 | time | tag3 | field_int2 |",
+ "+-----------+------+------+--------------------------------+------+------------+",
+ "| 1000 | MT | CT | 1970-01-01T00:00:00.000001Z | | |",
+ "| 10 | MT | AL | 1970-01-01T00:00:00.000007Z | | |",
+ "| 70 | CT | CT | 1970-01-01T00:00:00.000000100Z | | |",
+ "| 100 | AL | MA | 1970-01-01T00:00:00.000000050Z | | |",
+ "| 5 | MT | AL | 1970-01-01T00:00:00.000005Z | | |",
+ "| 1000 | MT | | 1970-01-01T00:00:00.000001Z | CT | |",
+ "| 10 | MT | | 1970-01-01T00:00:00.000007Z | MT | |",
+ "| 70 | CT | | 1970-01-01T00:00:00.000000100Z | AL | |",
+ "| 100 | AL | | 1970-01-01T00:00:00.000000050Z | AL | |",
+ "| 5 | MT | | 1970-01-01T00:00:00.000005Z | MT | |",
+ "| 1000 | | | 1970-01-01T00:00:00.000001Z | CT | 1000 |",
+ "| 10 | | | 1970-01-01T00:00:00.000007Z | MT | 10 |",
+ "| 70 | | | 1970-01-01T00:00:00.000000100Z | AL | 70 |",
+ "| 100 | | | 1970-01-01T00:00:00.000000050Z | AL | 100 |",
+ "| 5 | | | 1970-01-01T00:00:00.000005Z | MT | 5 |",
+ "+-----------+------+------+--------------------------------+------+------------+",
];
- assert_batches_eq!(&expected, &raw_data(&chunks).await);
+ assert_batches_eq!(
+ &expected,
+ &equalize_batch_schemas(raw_data(&chunks).await).unwrap()
+ );
let output_sort_key = SortKey::from_columns(vec!["tag2", "tag1", "time"]);
let sort_plan = Deduplicater::build_deduplicate_plan_for_overlapped_chunks(
|
004b401a05160ba0dc8f7c044cf973816fa59310
|
Marco Neumann
|
2023-07-19 14:18:57
|
upgrade to sqlx 0.7.1 (#8266)
|
There are a bunch of dependencies in `Cargo.lock` that are related to
mysql. These are NOT compiled at all, and are also not part of `cargo
tree`. The reason for the inclusion is a bug in cargo:
https://github.com/rust-lang/cargo/issues/10801
|
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
chore: upgrade to sqlx 0.7.1 (#8266)
There are a bunch of dependencies in `Cargo.lock` that are related to
mysql. These are NOT compiled at all, and are also not part of `cargo
tree`. The reason for the inclusion is a bug in cargo:
https://github.com/rust-lang/cargo/issues/10801
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
diff --git a/Cargo.lock b/Cargo.lock
index 3c0ad750eb..0c71e7f51f 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -17,17 +17,6 @@ version = "1.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe"
-[[package]]
-name = "ahash"
-version = "0.7.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47"
-dependencies = [
- "getrandom",
- "once_cell",
- "version_check",
-]
-
[[package]]
name = "ahash"
version = "0.8.3"
@@ -165,7 +154,7 @@ version = "43.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2feeebd77b34b0bc88f224e06d01c27da4733997cc4789a4e056196656cdc59a"
dependencies = [
- "ahash 0.8.3",
+ "ahash",
"arrow-arith",
"arrow-array",
"arrow-buffer",
@@ -202,7 +191,7 @@ version = "43.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "63d7ea725f7d1f8bb2cffc53ef538557e95fc802e217d5be25122d402e22f3d0"
dependencies = [
- "ahash 0.8.3",
+ "ahash",
"arrow-buffer",
"arrow-data",
"arrow-schema",
@@ -354,7 +343,7 @@ version = "43.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0a75a4a757afc301ce010adadff54d79d66140c4282ed3de565f6ccb716a5cf3"
dependencies = [
- "ahash 0.8.3",
+ "ahash",
"arrow-array",
"arrow-buffer",
"arrow-data",
@@ -402,7 +391,7 @@ dependencies = [
name = "arrow_util"
version = "0.1.0"
dependencies = [
- "ahash 0.8.3",
+ "ahash",
"arrow",
"chrono",
"comfy-table",
@@ -512,9 +501,9 @@ dependencies = [
[[package]]
name = "atoi"
-version = "1.0.0"
+version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d7c57d12312ff59c811c0643f4d80830505833c9ffaebd193d819392b265be8e"
+checksum = "f28d99ec8bfea296261ca1af174f24225171fea9664ba9003cbebee704810528"
dependencies = [
"num-traits",
]
@@ -531,7 +520,7 @@ dependencies = [
"iox_time",
"metric",
"observability_deps",
- "parking_lot 0.12.1",
+ "parking_lot",
"paste",
"snafu",
"test_helpers_end_to_end",
@@ -629,6 +618,12 @@ version = "0.21.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "604178f6c5c21f02dc555784810edfb88d34ac2c73b2eae109655649ee73ce3d"
+[[package]]
+name = "base64ct"
+version = "1.6.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b"
+
[[package]]
name = "bitflags"
version = "1.3.2"
@@ -640,6 +635,9 @@ name = "bitflags"
version = "2.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "630be753d4e58660abd17930c71b647fe46c27ea6b63cc59e1e3851406972e42"
+dependencies = [
+ "serde",
+]
[[package]]
name = "blake2"
@@ -762,7 +760,7 @@ dependencies = [
"iox_time",
"metric",
"observability_deps",
- "parking_lot 0.12.1",
+ "parking_lot",
"pdatastructs",
"proptest",
"rand",
@@ -1085,7 +1083,7 @@ dependencies = [
"futures",
"hdrhistogram",
"humantime",
- "parking_lot 0.12.1",
+ "parking_lot",
"prost-types",
"serde",
"serde_json",
@@ -1098,6 +1096,12 @@ dependencies = [
"tracing-subscriber",
]
+[[package]]
+name = "const-oid"
+version = "0.9.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "795bc6e66a8e340f075fcf6227e417a2dc976b92b91f3cdc778bb858778b6747"
+
[[package]]
name = "const-random"
version = "0.1.15"
@@ -1330,7 +1334,7 @@ dependencies = [
"hashbrown 0.12.3",
"lock_api",
"once_cell",
- "parking_lot_core 0.9.8",
+ "parking_lot_core",
]
[[package]]
@@ -1364,7 +1368,7 @@ name = "datafusion"
version = "27.0.0"
source = "git+https://github.com/apache/arrow-datafusion.git?rev=46182c894e5106adba7fb53e9848ce666fb6129b#46182c894e5106adba7fb53e9848ce666fb6129b"
dependencies = [
- "ahash 0.8.3",
+ "ahash",
"arrow",
"arrow-array",
"arrow-schema",
@@ -1391,7 +1395,7 @@ dependencies = [
"log",
"num_cpus",
"object_store",
- "parking_lot 0.12.1",
+ "parking_lot",
"parquet",
"percent-encoding",
"pin-project-lite",
@@ -1432,7 +1436,7 @@ dependencies = [
"hashbrown 0.14.0",
"log",
"object_store",
- "parking_lot 0.12.1",
+ "parking_lot",
"rand",
"tempfile",
"url",
@@ -1443,7 +1447,7 @@ name = "datafusion-expr"
version = "27.0.0"
source = "git+https://github.com/apache/arrow-datafusion.git?rev=46182c894e5106adba7fb53e9848ce666fb6129b#46182c894e5106adba7fb53e9848ce666fb6129b"
dependencies = [
- "ahash 0.8.3",
+ "ahash",
"arrow",
"datafusion-common",
"lazy_static",
@@ -1474,7 +1478,7 @@ name = "datafusion-physical-expr"
version = "27.0.0"
source = "git+https://github.com/apache/arrow-datafusion.git?rev=46182c894e5106adba7fb53e9848ce666fb6129b#46182c894e5106adba7fb53e9848ce666fb6129b"
dependencies = [
- "ahash 0.8.3",
+ "ahash",
"arrow",
"arrow-array",
"arrow-buffer",
@@ -1567,6 +1571,17 @@ dependencies = [
"uuid",
]
+[[package]]
+name = "der"
+version = "0.7.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0c7ed52955ce76b1554f509074bb357d3fb8ac9b51288a65a3fd480d1dfba946"
+dependencies = [
+ "const-oid",
+ "pem-rfc7468",
+ "zeroize",
+]
+
[[package]]
name = "diff"
version = "0.1.13"
@@ -1586,30 +1601,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292"
dependencies = [
"block-buffer",
+ "const-oid",
"crypto-common",
"subtle",
]
-[[package]]
-name = "dirs"
-version = "4.0.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ca3aa72a6f96ea37bbc5aa912f6788242832f75369bdfdadcb0e38423f100059"
-dependencies = [
- "dirs-sys",
-]
-
-[[package]]
-name = "dirs-sys"
-version = "0.3.7"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1b1d1d91c932ef41c0f2663aa8b0ca0342d444d842c06914aa0a7e352d0bada6"
-dependencies = [
- "libc",
- "redox_users",
- "winapi",
-]
-
[[package]]
name = "dml"
version = "0.1.0"
@@ -1641,6 +1637,9 @@ name = "either"
version = "1.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7fcaabb2fef8c910e7f4c7ce9f67a1283a1715879a7c230ca9d6d1ae31f16d91"
+dependencies = [
+ "serde",
+]
[[package]]
name = "encode_unicode"
@@ -1694,6 +1693,17 @@ dependencies = [
"str-buf",
]
+[[package]]
+name = "etcetera"
+version = "0.8.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "136d1b5283a1ab77bd9257427ffd09d8667ced0570b6f938942bc7568ed5b943"
+dependencies = [
+ "cfg-if",
+ "home",
+ "windows-sys 0.48.0",
+]
+
[[package]]
name = "event-listener"
version = "2.5.3"
@@ -1709,7 +1719,7 @@ dependencies = [
"metric",
"observability_deps",
"once_cell",
- "parking_lot 0.12.1",
+ "parking_lot",
"pin-project",
"tokio",
"tokio-util",
@@ -1884,13 +1894,13 @@ dependencies = [
[[package]]
name = "futures-intrusive"
-version = "0.4.2"
+version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a604f7a68fbf8103337523b1fadc8ade7361ee3f112f7c680ad179651616aed5"
+checksum = "1d930c203dd0b6ff06e0201a4a2fe9149b43c684fd4420555b26d21b1a02956f"
dependencies = [
"futures-core",
"lock_api",
- "parking_lot 0.11.2",
+ "parking_lot",
]
[[package]]
@@ -2148,7 +2158,7 @@ version = "0.14.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2c6201b9ff9fd90a5a3bac2e56a830d0caa509576f0e503818ee82c181b3437a"
dependencies = [
- "ahash 0.8.3",
+ "ahash",
"allocator-api2",
]
@@ -2315,9 +2325,9 @@ checksum = "0646026eb1b3eea4cd9ba47912ea5ce9cc07713d105b1a14698f4e6433d348b7"
dependencies = [
"http",
"hyper",
- "rustls 0.21.5",
+ "rustls",
"tokio",
- "tokio-rustls 0.24.1",
+ "tokio-rustls",
]
[[package]]
@@ -2412,7 +2422,7 @@ version = "0.11.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2fb7c1b80a1dfa604bb4a649a5c5aeef3d913f7c520cb42b40e534e8a61bcdfc"
dependencies = [
- "ahash 0.8.3",
+ "ahash",
"indexmap 1.9.3",
"is-terminal",
"itoa",
@@ -2444,7 +2454,7 @@ dependencies = [
"futures",
"mockito",
"once_cell",
- "parking_lot 0.12.1",
+ "parking_lot",
"reqwest",
"serde",
"serde_json",
@@ -2529,7 +2539,7 @@ dependencies = [
"observability_deps",
"once_cell",
"panic_logging",
- "parking_lot 0.12.1",
+ "parking_lot",
"parquet_file",
"parquet_to_line_protocol",
"predicate",
@@ -2660,7 +2670,7 @@ dependencies = [
"object_store",
"observability_deps",
"once_cell",
- "parking_lot 0.12.1",
+ "parking_lot",
"parquet_file",
"paste",
"pin-project",
@@ -2799,7 +2809,7 @@ dependencies = [
"mutable_batch",
"mutable_batch_lp",
"observability_deps",
- "parking_lot 0.12.1",
+ "parking_lot",
"paste",
"pretty_assertions",
"rand",
@@ -2869,7 +2879,7 @@ dependencies = [
"object_store",
"observability_deps",
"once_cell",
- "parking_lot 0.12.1",
+ "parking_lot",
"parquet_file",
"predicate",
"query_functions",
@@ -2959,7 +2969,7 @@ name = "iox_time"
version = "0.1.0"
dependencies = [
"chrono",
- "parking_lot 0.12.1",
+ "parking_lot",
"tokio",
"workspace-hack",
]
@@ -2984,7 +2994,7 @@ dependencies = [
"metric",
"metric_exporters",
"observability_deps",
- "parking_lot 0.12.1",
+ "parking_lot",
"pprof",
"reqwest",
"serde",
@@ -3203,6 +3213,9 @@ name = "lazy_static"
version = "1.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
+dependencies = [
+ "spin 0.5.2",
+]
[[package]]
name = "lexical-core"
@@ -3282,9 +3295,9 @@ checksum = "f7012b1bbb0719e1097c47611d3898568c546d597c2e74d66f6087edd5233ff4"
[[package]]
name = "libsqlite3-sys"
-version = "0.24.2"
+version = "0.26.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "898745e570c7d0453cc1fbc4a701eb6c662ed54e8fec8b7d14be137ebeeb9d14"
+checksum = "afc22eff61b133b115c6e8c74e818c628d6d5e7a502afea6f64dee076dd94326"
dependencies = [
"cc",
"pkg-config",
@@ -3331,7 +3344,7 @@ version = "0.1.0"
dependencies = [
"observability_deps",
"once_cell",
- "parking_lot 0.12.1",
+ "parking_lot",
"regex",
"tracing-subscriber",
"workspace-hack",
@@ -3429,7 +3442,7 @@ dependencies = [
name = "metric"
version = "0.1.0"
dependencies = [
- "parking_lot 0.12.1",
+ "parking_lot",
"workspace-hack",
]
@@ -3655,6 +3668,23 @@ dependencies = [
"num-traits",
]
+[[package]]
+name = "num-bigint-dig"
+version = "0.8.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dc84195820f291c7697304f3cbdadd1cb7199c0efc917ff5eafd71225c136151"
+dependencies = [
+ "byteorder",
+ "lazy_static",
+ "libm",
+ "num-integer",
+ "num-iter",
+ "num-traits",
+ "rand",
+ "smallvec",
+ "zeroize",
+]
+
[[package]]
name = "num-complex"
version = "0.4.3"
@@ -3750,7 +3780,7 @@ dependencies = [
"humantime",
"hyper",
"itertools 0.10.5",
- "parking_lot 0.12.1",
+ "parking_lot",
"percent-encoding",
"quick-xml 0.28.2",
"rand",
@@ -3796,7 +3826,7 @@ version = "1.18.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d"
dependencies = [
- "parking_lot_core 0.9.8",
+ "parking_lot_core",
]
[[package]]
@@ -3839,17 +3869,6 @@ dependencies = [
"workspace-hack",
]
-[[package]]
-name = "parking_lot"
-version = "0.11.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99"
-dependencies = [
- "instant",
- "lock_api",
- "parking_lot_core 0.8.6",
-]
-
[[package]]
name = "parking_lot"
version = "0.12.1"
@@ -3857,21 +3876,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f"
dependencies = [
"lock_api",
- "parking_lot_core 0.9.8",
-]
-
-[[package]]
-name = "parking_lot_core"
-version = "0.8.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "60a2cfe6f0ad2bfc16aefa463b497d5c7a5ecd44a23efa72aa342d90177356dc"
-dependencies = [
- "cfg-if",
- "instant",
- "libc",
- "redox_syscall 0.2.16",
- "smallvec",
- "winapi",
+ "parking_lot_core",
]
[[package]]
@@ -3893,7 +3898,7 @@ version = "43.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ec7267a9607c3f955d4d0ac41b88a67cecc0d8d009173ad3da390699a6cb3750"
dependencies = [
- "ahash 0.8.3",
+ "ahash",
"arrow-array",
"arrow-buffer",
"arrow-cast",
@@ -4030,6 +4035,15 @@ dependencies = [
"fixedbitset",
]
+[[package]]
+name = "pem-rfc7468"
+version = "0.7.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "88b39c9bfcfc231068454382784bb460aae594343fb030d46e9f50a645418412"
+dependencies = [
+ "base64ct",
+]
+
[[package]]
name = "percent-encoding"
version = "2.3.0"
@@ -4160,6 +4174,27 @@ version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
+[[package]]
+name = "pkcs1"
+version = "0.7.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c8ffb9f10fa047879315e6625af03c164b16962a5368d724ed16323b68ace47f"
+dependencies = [
+ "der",
+ "pkcs8",
+ "spki",
+]
+
+[[package]]
+name = "pkcs8"
+version = "0.10.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7"
+dependencies = [
+ "der",
+ "spki",
+]
+
[[package]]
name = "pkg-config"
version = "0.3.27"
@@ -4180,7 +4215,7 @@ dependencies = [
"log",
"nix",
"once_cell",
- "parking_lot 0.12.1",
+ "parking_lot",
"prost",
"prost-build",
"prost-derive",
@@ -4293,7 +4328,7 @@ dependencies = [
"fnv",
"lazy_static",
"memchr",
- "parking_lot 0.12.1",
+ "parking_lot",
"thiserror",
]
@@ -4405,7 +4440,7 @@ dependencies = [
"object_store",
"object_store_metrics",
"observability_deps",
- "parking_lot 0.12.1",
+ "parking_lot",
"parquet_file",
"pin-project",
"predicate",
@@ -4550,17 +4585,6 @@ dependencies = [
"bitflags 1.3.2",
]
-[[package]]
-name = "redox_users"
-version = "0.4.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b"
-dependencies = [
- "getrandom",
- "redox_syscall 0.2.16",
- "thiserror",
-]
-
[[package]]
name = "regex"
version = "1.9.1"
@@ -4628,13 +4652,13 @@ dependencies = [
"once_cell",
"percent-encoding",
"pin-project-lite",
- "rustls 0.21.5",
+ "rustls",
"rustls-pemfile",
"serde",
"serde_json",
"serde_urlencoded",
"tokio",
- "tokio-rustls 0.24.1",
+ "tokio-rustls",
"tokio-util",
"tower-service",
"url",
@@ -4700,7 +4724,7 @@ dependencies = [
"object_store",
"observability_deps",
"once_cell",
- "parking_lot 0.12.1",
+ "parking_lot",
"paste",
"pretty_assertions",
"proptest",
@@ -4725,6 +4749,28 @@ dependencies = [
"workspace-hack",
]
+[[package]]
+name = "rsa"
+version = "0.9.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6ab43bb47d23c1a631b4b680199a45255dce26fa9ab2fa902581f624ff13e6a8"
+dependencies = [
+ "byteorder",
+ "const-oid",
+ "digest",
+ "num-bigint-dig",
+ "num-integer",
+ "num-iter",
+ "num-traits",
+ "pkcs1",
+ "pkcs8",
+ "rand_core",
+ "signature",
+ "spki",
+ "subtle",
+ "zeroize",
+]
+
[[package]]
name = "rustc-demangle"
version = "0.1.23"
@@ -4767,18 +4813,6 @@ dependencies = [
"windows-sys 0.48.0",
]
-[[package]]
-name = "rustls"
-version = "0.20.8"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fff78fc74d175294f4e83b28343315ffcfb114b156f0185e9741cb5570f50e2f"
-dependencies = [
- "log",
- "ring",
- "sct",
- "webpki",
-]
-
[[package]]
name = "rustls"
version = "0.21.5"
@@ -4966,7 +5000,7 @@ dependencies = [
"iox_query_influxql",
"iox_query_influxrpc",
"metric",
- "parking_lot 0.12.1",
+ "parking_lot",
"predicate",
"tonic",
"trace",
@@ -5036,7 +5070,7 @@ dependencies = [
"metric",
"observability_deps",
"panic_logging",
- "parking_lot 0.12.1",
+ "parking_lot",
"pin-project",
"predicate",
"prost",
@@ -5171,7 +5205,7 @@ dependencies = [
"hashbrown 0.14.0",
"mutable_batch",
"mutable_batch_lp",
- "parking_lot 0.12.1",
+ "parking_lot",
"rand",
"siphasher",
"workspace-hack",
@@ -5186,6 +5220,16 @@ dependencies = [
"libc",
]
+[[package]]
+name = "signature"
+version = "2.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5e1788eed21689f9cf370582dfc467ef36ed9c707f073528ddafa8d83e3b8500"
+dependencies = [
+ "digest",
+ "rand_core",
+]
+
[[package]]
name = "similar"
version = "2.2.1"
@@ -5266,6 +5310,16 @@ dependencies = [
"lock_api",
]
+[[package]]
+name = "spki"
+version = "0.7.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9d1e996ef02c474957d681f1b05213dfb0abab947b446a62d37770b23500184a"
+dependencies = [
+ "base64ct",
+ "der",
+]
+
[[package]]
name = "sqlformat"
version = "0.2.1"
@@ -5300,69 +5354,59 @@ dependencies = [
[[package]]
name = "sqlx"
-version = "0.6.3"
+version = "0.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f8de3b03a925878ed54a954f621e64bf55a3c1bd29652d0d1a17830405350188"
+checksum = "8e58421b6bc416714d5115a2ca953718f6c621a51b68e4f4922aea5a4391a721"
dependencies = [
"sqlx-core",
"sqlx-macros",
+ "sqlx-mysql",
+ "sqlx-postgres",
+ "sqlx-sqlite",
]
[[package]]
name = "sqlx-core"
-version = "0.6.3"
+version = "0.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fa8241483a83a3f33aa5fff7e7d9def398ff9990b2752b6c6112b83c6d246029"
+checksum = "dd4cef4251aabbae751a3710927945901ee1d97ee96d757f6880ebb9a79bfd53"
dependencies = [
- "ahash 0.7.6",
+ "ahash",
"atoi",
- "base64 0.13.1",
- "bitflags 1.3.2",
"byteorder",
"bytes",
"crc",
"crossbeam-queue",
- "dirs",
"dotenvy",
"either",
"event-listener",
- "flume",
"futures-channel",
"futures-core",
- "futures-executor",
"futures-intrusive",
+ "futures-io",
"futures-util",
"hashlink",
"hex",
- "hkdf",
- "hmac",
- "indexmap 1.9.3",
- "itoa",
- "libc",
- "libsqlite3-sys",
+ "indexmap 2.0.0",
"log",
- "md-5",
"memchr",
"once_cell",
"paste",
"percent-encoding",
- "rand",
- "rustls 0.20.8",
+ "rustls",
"rustls-pemfile",
"serde",
"serde_json",
- "sha1",
"sha2",
"smallvec",
"sqlformat",
- "sqlx-rt",
- "stringprep",
"thiserror",
+ "tokio",
"tokio-stream",
+ "tracing",
"url",
"uuid",
- "webpki-roots 0.22.6",
- "whoami",
+ "webpki-roots 0.24.0",
]
[[package]]
@@ -5380,33 +5424,147 @@ dependencies = [
[[package]]
name = "sqlx-macros"
-version = "0.6.3"
+version = "0.7.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "208e3165167afd7f3881b16c1ef3f2af69fa75980897aac8874a0696516d12c2"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "sqlx-core",
+ "sqlx-macros-core",
+ "syn 1.0.109",
+]
+
+[[package]]
+name = "sqlx-macros-core"
+version = "0.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9966e64ae989e7e575b19d7265cb79d7fc3cbbdf179835cb0d716f294c2049c9"
+checksum = "8a4a8336d278c62231d87f24e8a7a74898156e34c1c18942857be2acb29c7dfc"
dependencies = [
"dotenvy",
"either",
"heck",
+ "hex",
"once_cell",
"proc-macro2",
"quote",
+ "serde",
"serde_json",
"sha2",
"sqlx-core",
- "sqlx-rt",
+ "sqlx-mysql",
+ "sqlx-postgres",
+ "sqlx-sqlite",
"syn 1.0.109",
+ "tempfile",
+ "tokio",
"url",
]
[[package]]
-name = "sqlx-rt"
-version = "0.6.3"
+name = "sqlx-mysql"
+version = "0.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "804d3f245f894e61b1e6263c84b23ca675d96753b5abfd5cc8597d86806e8024"
+checksum = "8ca69bf415b93b60b80dc8fda3cb4ef52b2336614d8da2de5456cc942a110482"
dependencies = [
+ "atoi",
+ "base64 0.21.2",
+ "bitflags 2.3.3",
+ "byteorder",
+ "bytes",
+ "crc",
+ "digest",
+ "dotenvy",
+ "either",
+ "futures-channel",
+ "futures-core",
+ "futures-io",
+ "futures-util",
+ "generic-array",
+ "hex",
+ "hkdf",
+ "hmac",
+ "itoa",
+ "log",
+ "md-5",
+ "memchr",
"once_cell",
- "tokio",
- "tokio-rustls 0.23.4",
+ "percent-encoding",
+ "rand",
+ "rsa",
+ "serde",
+ "sha1",
+ "sha2",
+ "smallvec",
+ "sqlx-core",
+ "stringprep",
+ "thiserror",
+ "tracing",
+ "uuid",
+ "whoami",
+]
+
+[[package]]
+name = "sqlx-postgres"
+version = "0.7.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a0db2df1b8731c3651e204629dd55e52adbae0462fa1bdcbed56a2302c18181e"
+dependencies = [
+ "atoi",
+ "base64 0.21.2",
+ "bitflags 2.3.3",
+ "byteorder",
+ "crc",
+ "dotenvy",
+ "etcetera",
+ "futures-channel",
+ "futures-core",
+ "futures-io",
+ "futures-util",
+ "hex",
+ "hkdf",
+ "hmac",
+ "home",
+ "itoa",
+ "log",
+ "md-5",
+ "memchr",
+ "once_cell",
+ "rand",
+ "serde",
+ "serde_json",
+ "sha1",
+ "sha2",
+ "smallvec",
+ "sqlx-core",
+ "stringprep",
+ "thiserror",
+ "tracing",
+ "uuid",
+ "whoami",
+]
+
+[[package]]
+name = "sqlx-sqlite"
+version = "0.7.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "be4c21bf34c7cae5b283efb3ac1bcc7670df7561124dc2f8bdc0b59be40f79a2"
+dependencies = [
+ "atoi",
+ "flume",
+ "futures-channel",
+ "futures-core",
+ "futures-executor",
+ "futures-intrusive",
+ "futures-util",
+ "libsqlite3-sys",
+ "log",
+ "percent-encoding",
+ "serde",
+ "sqlx-core",
+ "tracing",
+ "url",
+ "uuid",
]
[[package]]
@@ -5595,7 +5753,7 @@ dependencies = [
"async-trait",
"dotenvy",
"observability_deps",
- "parking_lot 0.12.1",
+ "parking_lot",
"tempfile",
"tokio",
"tracing-log",
@@ -5626,7 +5784,7 @@ dependencies = [
"nix",
"observability_deps",
"once_cell",
- "parking_lot 0.12.1",
+ "parking_lot",
"prost",
"rand",
"regex",
@@ -5760,7 +5918,7 @@ dependencies = [
"libc",
"mio",
"num_cpus",
- "parking_lot 0.12.1",
+ "parking_lot",
"pin-project-lite",
"signal-hook-registry",
"socket2",
@@ -5790,24 +5948,13 @@ dependencies = [
"syn 2.0.26",
]
-[[package]]
-name = "tokio-rustls"
-version = "0.23.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c43ee83903113e03984cb9e5cebe6c04a5116269e900e3ddba8f068a62adda59"
-dependencies = [
- "rustls 0.20.8",
- "tokio",
- "webpki",
-]
-
[[package]]
name = "tokio-rustls"
version = "0.24.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081"
dependencies = [
- "rustls 0.21.5",
+ "rustls",
"tokio",
]
@@ -5842,7 +5989,7 @@ name = "tokio_metrics_bridge"
version = "0.1.0"
dependencies = [
"metric",
- "parking_lot 0.12.1",
+ "parking_lot",
"tokio",
"workspace-hack",
]
@@ -5904,7 +6051,7 @@ dependencies = [
"prost",
"rustls-pemfile",
"tokio",
- "tokio-rustls 0.24.1",
+ "tokio-rustls",
"tokio-stream",
"tower",
"tower-layer",
@@ -6009,7 +6156,7 @@ version = "0.1.0"
dependencies = [
"chrono",
"observability_deps",
- "parking_lot 0.12.1",
+ "parking_lot",
"rand",
"workspace-hack",
]
@@ -6042,7 +6189,7 @@ dependencies = [
"itertools 0.11.0",
"metric",
"observability_deps",
- "parking_lot 0.12.1",
+ "parking_lot",
"pin-project",
"snafu",
"tower",
@@ -6114,7 +6261,7 @@ dependencies = [
"matchers",
"nu-ansi-term 0.46.0",
"once_cell",
- "parking_lot 0.12.1",
+ "parking_lot",
"regex",
"serde",
"serde_json",
@@ -6137,7 +6284,7 @@ dependencies = [
"lock_api",
"metric",
"observability_deps",
- "parking_lot 0.12.1",
+ "parking_lot",
"pin-project",
"sysinfo",
"tempfile",
@@ -6309,7 +6456,7 @@ dependencies = [
"mutable_batch_lp",
"mutable_batch_pb",
"observability_deps",
- "parking_lot 0.12.1",
+ "parking_lot",
"prost",
"snafu",
"snap",
@@ -6480,6 +6627,15 @@ dependencies = [
"rustls-webpki 0.100.1",
]
+[[package]]
+name = "webpki-roots"
+version = "0.24.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b291546d5d9d1eab74f069c77749f2cb8504a12caa20f0f2de93ddbf6f411888"
+dependencies = [
+ "rustls-webpki 0.101.1",
+]
+
[[package]]
name = "which"
version = "4.4.0"
@@ -6496,10 +6652,6 @@ name = "whoami"
version = "1.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "22fc3756b8a9133049b26c7f61ab35416c130e8c09b660f5b3958b446f52cc50"
-dependencies = [
- "wasm-bindgen",
- "web-sys",
-]
[[package]]
name = "winapi"
@@ -6695,15 +6847,14 @@ dependencies = [
name = "workspace-hack"
version = "0.1.0"
dependencies = [
- "ahash 0.8.3",
+ "ahash",
"arrow",
"arrow-array",
"arrow-flight",
"arrow-ord",
"arrow-string",
- "base64 0.13.1",
"base64 0.21.2",
- "bitflags 1.3.2",
+ "bitflags 2.3.3",
"byteorder",
"bytes",
"cc",
@@ -6728,7 +6879,6 @@ dependencies = [
"getrandom",
"hashbrown 0.14.0",
"heck",
- "indexmap 1.9.3",
"indexmap 2.0.0",
"itertools 0.10.5",
"libc",
@@ -6741,7 +6891,7 @@ dependencies = [
"num-traits",
"object_store",
"once_cell",
- "parking_lot 0.12.1",
+ "parking_lot",
"parquet",
"petgraph",
"phf_shared",
@@ -6757,7 +6907,7 @@ dependencies = [
"reqwest",
"ring",
"rustix 0.38.4",
- "rustls 0.21.5",
+ "rustls",
"scopeguard",
"serde",
"serde_json",
@@ -6768,6 +6918,9 @@ dependencies = [
"sqlx",
"sqlx-core",
"sqlx-macros",
+ "sqlx-macros-core",
+ "sqlx-postgres",
+ "sqlx-sqlite",
"syn 1.0.109",
"syn 2.0.26",
"thrift",
@@ -6784,7 +6937,6 @@ dependencies = [
"unicode-normalization",
"url",
"uuid",
- "webpki",
"winapi",
"windows-sys 0.48.0",
"zstd",
@@ -6816,6 +6968,12 @@ version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec"
+[[package]]
+name = "zeroize"
+version = "1.6.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9"
+
[[package]]
name = "zstd"
version = "0.12.3+zstd.1.5.2"
diff --git a/data_types/Cargo.toml b/data_types/Cargo.toml
index 12e26e19a5..4c11fa539e 100644
--- a/data_types/Cargo.toml
+++ b/data_types/Cargo.toml
@@ -17,7 +17,7 @@ once_cell = "1"
ordered-float = "3"
schema = { path = "../schema" }
sha2 = "0.10"
-sqlx = { version = "0.6", features = ["runtime-tokio-rustls", "postgres", "uuid"] }
+sqlx = { version = "0.7.1", features = ["runtime-tokio-rustls", "postgres", "uuid"] }
thiserror = "1.0.43"
uuid = { version = "1", features = ["v4"] }
workspace-hack = { version = "0.1", path = "../workspace-hack" }
diff --git a/data_types/src/columns.rs b/data_types/src/columns.rs
index cada47ffeb..c61cca7238 100644
--- a/data_types/src/columns.rs
+++ b/data_types/src/columns.rs
@@ -4,7 +4,6 @@ use super::TableId;
use generated_types::influxdata::iox::schema::v1 as proto;
use influxdb_line_protocol::FieldValue;
use schema::{builder::SchemaBuilder, InfluxColumnType, InfluxFieldType, Schema};
-use sqlx::postgres::PgHasArrayType;
use std::{
collections::{BTreeMap, BTreeSet, HashMap},
convert::TryFrom,
@@ -26,12 +25,6 @@ impl ColumnId {
}
}
-impl PgHasArrayType for ColumnId {
- fn array_type_info() -> sqlx::postgres::PgTypeInfo {
- <i64 as PgHasArrayType>::array_type_info()
- }
-}
-
/// Column definitions for a table indexed by their name
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct ColumnsByName(BTreeMap<String, ColumnSchema>);
@@ -328,7 +321,7 @@ impl TryFrom<proto::column_schema::ColumnType> for ColumnType {
/// Set of columns.
#[derive(Debug, Clone, PartialEq, Eq, sqlx::Type)]
-#[sqlx(transparent)]
+#[sqlx(transparent, no_pg_array)]
pub struct ColumnSet(Vec<ColumnId>);
impl ColumnSet {
diff --git a/data_types/src/partition_template.rs b/data_types/src/partition_template.rs
index 1f4457869a..84eb4ea5fd 100644
--- a/data_types/src/partition_template.rs
+++ b/data_types/src/partition_template.rs
@@ -244,7 +244,7 @@ pub static PARTITION_BY_DAY_PROTO: Lazy<Arc<proto::PartitionTemplate>> = Lazy::n
/// A partition template specified by a namespace record.
#[derive(Debug, PartialEq, Clone, Default, sqlx::Type)]
-#[sqlx(transparent)]
+#[sqlx(transparent, no_pg_array)]
pub struct NamespacePartitionTemplateOverride(Option<serialization::Wrapper>);
impl TryFrom<proto::PartitionTemplate> for NamespacePartitionTemplateOverride {
@@ -259,7 +259,7 @@ impl TryFrom<proto::PartitionTemplate> for NamespacePartitionTemplateOverride {
/// A partition template specified by a table record.
#[derive(Debug, PartialEq, Eq, Clone, Default, sqlx::Type)]
-#[sqlx(transparent)]
+#[sqlx(transparent, no_pg_array)]
pub struct TablePartitionTemplateOverride(Option<serialization::Wrapper>);
impl TablePartitionTemplateOverride {
diff --git a/deny.toml b/deny.toml
index 2df6d73d23..b97e99ca53 100644
--- a/deny.toml
+++ b/deny.toml
@@ -7,14 +7,6 @@ yanked = "deny"
unmaintained = "warn"
notice = "warn"
ignore = [
- # "It was sometimes possible for SQLite versions >= 1.0.12, < 3.39.2 to allow an array-bounds overflow when large
- # string were input into SQLite's printf function."
- #
- # We are not using `printf` with untrusted inputs.
- #
- # This is currently blocked by upstream:
- # https://github.com/launchbadge/sqlx/issues/2346
- "RUSTSEC-2022-0090",
]
git-fetch-with-cli = true
diff --git a/garbage_collector/Cargo.toml b/garbage_collector/Cargo.toml
index 2a95937159..3b83f6f7c7 100644
--- a/garbage_collector/Cargo.toml
+++ b/garbage_collector/Cargo.toml
@@ -32,5 +32,5 @@ metric = { path = "../metric" }
once_cell = { version = "1.18", features = ["parking_lot"] }
parquet_file = { path = "../parquet_file" }
tempfile = "3"
-sqlx = { version = "0.6", features = [ "runtime-tokio-rustls" ] }
+sqlx = { version = "0.7.1", features = [ "runtime-tokio-rustls" ] }
diff --git a/iox_catalog/Cargo.toml b/iox_catalog/Cargo.toml
index ea3a878afd..3730dd2bba 100644
--- a/iox_catalog/Cargo.toml
+++ b/iox_catalog/Cargo.toml
@@ -18,7 +18,7 @@ parking_lot = { version = "0.12" }
serde = { version = "1.0", features = ["derive"] }
siphasher = "0.3"
snafu = "0.7"
-sqlx = { version = "0.6", features = [ "runtime-tokio-rustls" , "postgres", "uuid", "sqlite" ] }
+sqlx = { version = "0.7.1", features = [ "runtime-tokio-rustls" , "postgres", "uuid", "sqlite" ] }
sqlx-hotswap-pool = { path = "../sqlx-hotswap-pool" }
thiserror = "1.0.43"
tokio = { version = "1.29", features = ["io-util", "macros", "parking_lot", "rt-multi-thread", "time"] }
diff --git a/iox_catalog/src/postgres.rs b/iox_catalog/src/postgres.rs
index 846aaf7975..c567cf71e5 100644
--- a/iox_catalog/src/postgres.rs
+++ b/iox_catalog/src/postgres.rs
@@ -329,9 +329,9 @@ async fn new_raw_pool(
parsed_dsn: &str,
) -> Result<sqlx::Pool<Postgres>, sqlx::Error> {
// sqlx exposes some options as pool options, while other options are available as connection options.
- let mut connect_options = PgConnectOptions::from_str(parsed_dsn)?;
- // the default is INFO, which is frankly surprising.
- connect_options.log_statements(log::LevelFilter::Trace);
+ let connect_options = PgConnectOptions::from_str(parsed_dsn)?
+ // the default is INFO, which is frankly surprising.
+ .log_statements(log::LevelFilter::Trace);
let app_name = options.app_name.clone();
let app_name2 = options.app_name.clone(); // just to log below
@@ -816,7 +816,7 @@ RETURNING *;
.bind(name) // $1
.bind(partition_template) // $2
.bind(namespace_id) // $3
- .fetch_one(&mut tx)
+ .fetch_one(&mut *tx)
.await
.map_err(|e| match e {
sqlx::Error::RowNotFound => Error::TableCreateLimitError {
@@ -843,7 +843,8 @@ RETURNING *;
// columns with an unsupported type.
for template_part in table.partition_template.parts() {
if let TemplatePart::TagValue(tag_name) = template_part {
- insert_column_with_connection(&mut tx, tag_name, table.id, ColumnType::Tag).await?;
+ insert_column_with_connection(&mut *tx, tag_name, table.id, ColumnType::Tag)
+ .await?;
}
}
@@ -1538,15 +1539,14 @@ WHERE object_store_id = $1;
) -> Result<Vec<Uuid>> {
sqlx::query(
// sqlx's readme suggests using PG's ANY operator instead of IN; see link below.
+ // https://github.com/launchbadge/sqlx/blob/main/FAQ.md#how-can-i-do-a-select--where-foo-in--query
r#"
SELECT object_store_id
FROM parquet_file
WHERE object_store_id = ANY($1);
"#,
)
- // from https://github.com/launchbadge/sqlx/blob/main/FAQ.md#how-can-i-do-a-select--where-foo-in--query
- // a bug of the parameter typechecking code requires all array parameters to be slices
- .bind(&object_store_ids[..]) // $1
+ .bind(object_store_ids) // $1
.map(|pgr| pgr.get::<Uuid, _>("object_store_id"))
.fetch_all(&mut self.inner)
.await
@@ -1576,13 +1576,13 @@ WHERE object_store_id = ANY($1);
.map_err(|e| Error::StartTransaction { source: e })?;
let marked_at = Timestamp::from(self.time_provider.now());
- flag_for_delete(&mut tx, delete, marked_at).await?;
+ flag_for_delete(&mut *tx, delete, marked_at).await?;
- update_compaction_level(&mut tx, upgrade, target_level).await?;
+ update_compaction_level(&mut *tx, upgrade, target_level).await?;
let mut ids = Vec::with_capacity(create.len());
for file in create {
- let id = create_parquet_file(&mut tx, file).await?;
+ let id = create_parquet_file(&mut *tx, file).await?;
ids.push(id);
}
@@ -1667,12 +1667,9 @@ async fn flag_for_delete<'q, E>(
where
E: Executor<'q, Database = Postgres>,
{
- // If I try to do `.bind(parquet_file_ids)` directly, I get a compile error from sqlx.
- // See https://github.com/launchbadge/sqlx/issues/1744
- let ids: Vec<_> = ids.iter().map(|p| p.get()).collect();
let query = sqlx::query(r#"UPDATE parquet_file SET to_delete = $1 WHERE id = ANY($2);"#)
.bind(marked_at) // $1
- .bind(&ids[..]); // $2
+ .bind(ids); // $2
query
.execute(executor)
.await
@@ -1689,9 +1686,6 @@ async fn update_compaction_level<'q, E>(
where
E: Executor<'q, Database = Postgres>,
{
- // If I try to do `.bind(parquet_file_ids)` directly, I get a compile error from sqlx.
- // See https://github.com/launchbadge/sqlx/issues/1744
- let ids: Vec<_> = parquet_file_ids.iter().map(|p| p.get()).collect();
let query = sqlx::query(
r#"
UPDATE parquet_file
@@ -1700,7 +1694,7 @@ WHERE id = ANY($2);
"#,
)
.bind(compaction_level) // $1
- .bind(&ids[..]); // $2
+ .bind(parquet_file_ids); // $2
query
.execute(executor)
.await
diff --git a/iox_catalog/src/sqlite.rs b/iox_catalog/src/sqlite.rs
index c35a9ef4b2..0c56d9e125 100644
--- a/iox_catalog/src/sqlite.rs
+++ b/iox_catalog/src/sqlite.rs
@@ -577,7 +577,7 @@ RETURNING *;
.bind(name) // $1
.bind(partition_template) // $2
.bind(namespace_id) // $3
- .fetch_one(&mut tx)
+ .fetch_one(&mut *tx)
.await
.map_err(|e| match e {
sqlx::Error::RowNotFound => Error::TableCreateLimitError {
@@ -604,7 +604,8 @@ RETURNING *;
// columns with an unsupported type.
for template_part in table.partition_template.parts() {
if let TemplatePart::TagValue(tag_name) = template_part {
- insert_column_with_connection(&mut tx, tag_name, table.id, ColumnType::Tag).await?;
+ insert_column_with_connection(&mut *tx, tag_name, table.id, ColumnType::Tag)
+ .await?;
}
}
@@ -1451,14 +1452,14 @@ WHERE object_store_id IN ({v});",
for id in delete {
let marked_at = Timestamp::from(self.time_provider.now());
- flag_for_delete(&mut tx, *id, marked_at).await?;
+ flag_for_delete(&mut *tx, *id, marked_at).await?;
}
- update_compaction_level(&mut tx, upgrade, target_level).await?;
+ update_compaction_level(&mut *tx, upgrade, target_level).await?;
let mut ids = Vec::with_capacity(create.len());
for file in create {
- let res = create_parquet_file(&mut tx, file.clone()).await?;
+ let res = create_parquet_file(&mut *tx, file.clone()).await?;
ids.push(res.id);
}
tx.commit()
@@ -1562,8 +1563,7 @@ async fn update_compaction_level<'q, E>(
where
E: Executor<'q, Database = Sqlite>,
{
- // If I try to do `.bind(parquet_file_ids)` directly, I get a compile error from sqlx.
- // See https://github.com/launchbadge/sqlx/issues/1744
+ // We use a JSON-based "IS IN" check.
let ids: Vec<_> = parquet_file_ids.iter().map(|p| p.get()).collect();
let query = sqlx::query(
r#"
diff --git a/sqlx-hotswap-pool/Cargo.toml b/sqlx-hotswap-pool/Cargo.toml
index 893bdeab81..8fe071fd7f 100644
--- a/sqlx-hotswap-pool/Cargo.toml
+++ b/sqlx-hotswap-pool/Cargo.toml
@@ -10,7 +10,7 @@ license.workspace = true
publish = false
[dependencies]
-sqlx = { version = "0.6.3", features = ["runtime-tokio-rustls", "postgres", "json", "tls"] }
+sqlx = { version = "0.7.1", features = ["runtime-tokio-rustls", "postgres", "json", "tls-rustls"] }
either = "1.8.1"
futures = "0.3"
workspace-hack = { version = "0.1", path = "../workspace-hack" }
diff --git a/test_helpers_end_to_end/Cargo.toml b/test_helpers_end_to_end/Cargo.toml
index 6178049d10..458847abc6 100644
--- a/test_helpers_end_to_end/Cargo.toml
+++ b/test_helpers_end_to_end/Cargo.toml
@@ -31,7 +31,7 @@ rand = "0.8.3"
regex = "1.9"
reqwest = { version = "0.11", default-features = false, features = ["json", "rustls-tls"] }
snafu = "0.7"
-sqlx = { version = "0.6", features = [ "runtime-tokio-rustls" , "postgres", "uuid" ] }
+sqlx = { version = "0.7.1", features = [ "runtime-tokio-rustls" , "postgres", "uuid" ] }
tempfile = "3.6.0"
test_helpers = { path = "../test_helpers", features = ["future_timeout"] }
tokio = { version = "1.29", features = ["macros", "net", "parking_lot", "rt-multi-thread", "signal", "sync", "time"] }
diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml
index e8f4472e75..33d48d78be 100644
--- a/workspace-hack/Cargo.toml
+++ b/workspace-hack/Cargo.toml
@@ -16,15 +16,13 @@ license.workspace = true
### BEGIN HAKARI SECTION
[dependencies]
-ahash = { version = "0.8", default-features = false, features = ["runtime-rng"] }
+ahash = { version = "0.8" }
arrow = { version = "43", features = ["dyn_cmp_dict", "prettyprint"] }
arrow-array = { version = "43", default-features = false, features = ["chrono-tz"] }
arrow-flight = { version = "43", features = ["flight-sql-experimental"] }
arrow-ord = { version = "43", default-features = false, features = ["dyn_cmp_dict"] }
arrow-string = { version = "43", default-features = false, features = ["dyn_cmp_dict"] }
-base64-594e8ee84c453af0 = { package = "base64", version = "0.13" }
-base64-647d43efb71741da = { package = "base64", version = "0.21" }
-bitflags = { version = "1" }
+base64 = { version = "0.21" }
byteorder = { version = "1" }
bytes = { version = "1" }
chrono = { version = "0.4", default-features = false, features = ["alloc", "clock", "serde"] }
@@ -34,7 +32,7 @@ datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "46
datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "46182c894e5106adba7fb53e9848ce666fb6129b", default-features = false, features = ["crypto_expressions", "regex_expressions", "unicode_expressions"] }
datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "46182c894e5106adba7fb53e9848ce666fb6129b", default-features = false, features = ["crypto_expressions", "encoding_expressions", "regex_expressions", "unicode_expressions"] }
digest = { version = "0.10", features = ["mac", "std"] }
-either = { version = "1" }
+either = { version = "1", features = ["serde"] }
fixedbitset = { version = "0.4" }
flatbuffers = { version = "23" }
flate2 = { version = "1" }
@@ -47,8 +45,7 @@ futures-task = { version = "0.3", default-features = false, features = ["std"] }
futures-util = { version = "0.3", features = ["channel", "io", "sink"] }
getrandom = { version = "0.2", default-features = false, features = ["std"] }
hashbrown = { version = "0.14", features = ["raw"] }
-indexmap-dff4ba8e3ae991db = { package = "indexmap", version = "1", default-features = false, features = ["std"] }
-indexmap-f595c2ba2a3f28df = { package = "indexmap", version = "2" }
+indexmap = { version = "2" }
itertools = { version = "0.10" }
libc = { version = "0.2", features = ["extra_traits"] }
lock_api = { version = "0.4", features = ["arc_lock"] }
@@ -74,14 +71,17 @@ regex-automata = { version = "0.3", default-features = false, features = ["dfa-o
regex-syntax = { version = "0.7" }
reqwest = { version = "0.11", default-features = false, features = ["json", "rustls-tls", "stream"] }
ring = { version = "0.16", features = ["std"] }
+rustls = { version = "0.21", default-features = false, features = ["dangerous_configuration", "logging", "tls12"] }
serde = { version = "1", features = ["derive", "rc"] }
serde_json = { version = "1", features = ["raw_value"] }
sha2 = { version = "0.10" }
similar = { version = "2", features = ["inline"] }
smallvec = { version = "1", default-features = false, features = ["union"] }
sqlparser = { version = "0.35", features = ["visitor"] }
-sqlx = { version = "0.6", features = ["json", "postgres", "runtime-tokio-rustls", "sqlite", "tls", "uuid"] }
-sqlx-core = { version = "0.6", default-features = false, features = ["any", "migrate", "postgres", "runtime-tokio-rustls", "sqlite", "uuid"] }
+sqlx = { version = "0.7", features = ["postgres", "runtime-tokio-rustls", "sqlite", "uuid"] }
+sqlx-core = { version = "0.7", features = ["_rt-tokio", "_tls-rustls", "any", "json", "migrate", "offline", "uuid"] }
+sqlx-postgres = { version = "0.7", default-features = false, features = ["any", "json", "migrate", "offline", "uuid"] }
+sqlx-sqlite = { version = "0.7", default-features = false, features = ["any", "json", "migrate", "offline", "uuid"] }
thrift = { version = "0.17" }
tokio = { version = "1", features = ["full", "test-util", "tracing"] }
tokio-stream = { version = "0.1", features = ["fs", "net"] }
@@ -101,17 +101,15 @@ zstd-safe = { version = "6", default-features = false, features = ["arrays", "le
zstd-sys = { version = "2", default-features = false, features = ["legacy", "std", "zdict_builder"] }
[build-dependencies]
-ahash = { version = "0.8", default-features = false, features = ["runtime-rng"] }
-base64-594e8ee84c453af0 = { package = "base64", version = "0.13" }
-base64-647d43efb71741da = { package = "base64", version = "0.21" }
-bitflags = { version = "1" }
+ahash = { version = "0.8" }
+base64 = { version = "0.21" }
byteorder = { version = "1" }
bytes = { version = "1" }
cc = { version = "1", default-features = false, features = ["parallel"] }
crossbeam-utils = { version = "0.8" }
crypto-common = { version = "0.1", default-features = false, features = ["std"] }
digest = { version = "0.10", features = ["mac", "std"] }
-either = { version = "1" }
+either = { version = "1", features = ["serde"] }
fixedbitset = { version = "0.4" }
futures-channel = { version = "0.3", features = ["sink"] }
futures-core = { version = "0.3" }
@@ -123,7 +121,7 @@ futures-util = { version = "0.3", features = ["channel", "io", "sink"] }
getrandom = { version = "0.2", default-features = false, features = ["std"] }
hashbrown = { version = "0.14", features = ["raw"] }
heck = { version = "0.4", features = ["unicode"] }
-indexmap-dff4ba8e3ae991db = { package = "indexmap", version = "1", default-features = false, features = ["std"] }
+indexmap = { version = "2" }
itertools = { version = "0.10" }
libc = { version = "0.2", features = ["extra_traits"] }
lock_api = { version = "0.4", features = ["arc_lock"] }
@@ -144,67 +142,75 @@ regex = { version = "1" }
regex-automata = { version = "0.3", default-features = false, features = ["dfa-onepass", "hybrid", "meta", "nfa-backtrack", "perf-inline", "perf-literal", "unicode"] }
regex-syntax = { version = "0.7" }
ring = { version = "0.16", features = ["std"] }
+rustls = { version = "0.21", default-features = false, features = ["dangerous_configuration", "logging", "tls12"] }
serde = { version = "1", features = ["derive", "rc"] }
serde_json = { version = "1", features = ["raw_value"] }
sha2 = { version = "0.10" }
smallvec = { version = "1", default-features = false, features = ["union"] }
-sqlx-core = { version = "0.6", default-features = false, features = ["any", "migrate", "postgres", "runtime-tokio-rustls", "sqlite", "uuid"] }
-sqlx-macros = { version = "0.6", default-features = false, features = ["json", "migrate", "postgres", "runtime-tokio-rustls", "sqlite", "uuid"] }
+sqlx-core = { version = "0.7", features = ["_rt-tokio", "_tls-rustls", "any", "json", "migrate", "offline", "uuid"] }
+sqlx-macros = { version = "0.7", features = ["_rt-tokio", "_tls-rustls", "json", "migrate", "postgres", "sqlite", "uuid"] }
+sqlx-macros-core = { version = "0.7", features = ["_rt-tokio", "_tls-rustls", "json", "migrate", "postgres", "sqlite", "uuid"] }
+sqlx-postgres = { version = "0.7", default-features = false, features = ["any", "json", "migrate", "offline", "uuid"] }
+sqlx-sqlite = { version = "0.7", default-features = false, features = ["any", "json", "migrate", "offline", "uuid"] }
syn-dff4ba8e3ae991db = { package = "syn", version = "1", features = ["extra-traits", "full"] }
syn-f595c2ba2a3f28df = { package = "syn", version = "2", features = ["extra-traits", "full", "visit-mut"] }
tokio = { version = "1", features = ["full", "test-util", "tracing"] }
tokio-stream = { version = "0.1", features = ["fs", "net"] }
+tracing = { version = "0.1", features = ["log", "max_level_trace", "release_max_level_trace"] }
+tracing-core = { version = "0.1" }
unicode-bidi = { version = "0.3" }
unicode-normalization = { version = "0.1" }
url = { version = "2" }
uuid = { version = "1", features = ["v4"] }
[target.x86_64-unknown-linux-gnu.dependencies]
+bitflags = { version = "2", default-features = false, features = ["std"] }
nix = { version = "0.26" }
once_cell = { version = "1", default-features = false, features = ["unstable"] }
rustix = { version = "0.38", features = ["fs", "termios"] }
-rustls = { version = "0.21", features = ["dangerous_configuration"] }
-webpki = { version = "0.22", default-features = false, features = ["std"] }
+rustls = { version = "0.21" }
[target.x86_64-unknown-linux-gnu.build-dependencies]
+bitflags = { version = "2", default-features = false, features = ["std"] }
once_cell = { version = "1", default-features = false, features = ["unstable"] }
-webpki = { version = "0.22", default-features = false, features = ["std"] }
+rustls = { version = "0.21" }
[target.x86_64-apple-darwin.dependencies]
+bitflags = { version = "2", default-features = false, features = ["std"] }
nix = { version = "0.26" }
once_cell = { version = "1", default-features = false, features = ["unstable"] }
rustix = { version = "0.38", features = ["fs", "termios"] }
-rustls = { version = "0.21", features = ["dangerous_configuration"] }
-webpki = { version = "0.22", default-features = false, features = ["std"] }
+rustls = { version = "0.21" }
[target.x86_64-apple-darwin.build-dependencies]
+bitflags = { version = "2", default-features = false, features = ["std"] }
once_cell = { version = "1", default-features = false, features = ["unstable"] }
-webpki = { version = "0.22", default-features = false, features = ["std"] }
+rustls = { version = "0.21" }
[target.aarch64-apple-darwin.dependencies]
+bitflags = { version = "2", default-features = false, features = ["std"] }
nix = { version = "0.26" }
once_cell = { version = "1", default-features = false, features = ["unstable"] }
rustix = { version = "0.38", features = ["fs", "termios"] }
-rustls = { version = "0.21", features = ["dangerous_configuration"] }
-webpki = { version = "0.22", default-features = false, features = ["std"] }
+rustls = { version = "0.21" }
[target.aarch64-apple-darwin.build-dependencies]
+bitflags = { version = "2", default-features = false, features = ["std"] }
once_cell = { version = "1", default-features = false, features = ["unstable"] }
-webpki = { version = "0.22", default-features = false, features = ["std"] }
+rustls = { version = "0.21" }
[target.x86_64-pc-windows-msvc.dependencies]
once_cell = { version = "1", default-features = false, features = ["unstable"] }
-rustls = { version = "0.21", features = ["dangerous_configuration"] }
+rustls = { version = "0.21" }
scopeguard = { version = "1" }
-webpki = { version = "0.22", default-features = false, features = ["std"] }
-winapi = { version = "0.3", default-features = false, features = ["basetsd", "cfg", "combaseapi", "consoleapi", "errhandlingapi", "evntrace", "fileapi", "handleapi", "heapapi", "ifdef", "impl-debug", "impl-default", "in6addr", "inaddr", "ioapiset", "iphlpapi", "knownfolders", "lmaccess", "lmapibuf", "lmcons", "memoryapi", "minwinbase", "minwindef", "netioapi", "ntlsa", "ntsecapi", "ntstatus", "objbase", "objidl", "oleauto", "pdh", "powerbase", "processenv", "psapi", "rpcdce", "sddl", "securitybaseapi", "shellapi", "shlobj", "std", "stringapiset", "synchapi", "sysinfoapi", "timezoneapi", "wbemcli", "winbase", "wincon", "windef", "winerror", "winioctl", "winnt", "winreg", "winsock2", "winuser", "ws2ipdef", "ws2tcpip", "wtypesbase"] }
+winapi = { version = "0.3", default-features = false, features = ["basetsd", "cfg", "combaseapi", "consoleapi", "errhandlingapi", "evntrace", "fileapi", "handleapi", "heapapi", "ifdef", "impl-debug", "impl-default", "in6addr", "inaddr", "ioapiset", "iphlpapi", "lmaccess", "lmapibuf", "lmcons", "memoryapi", "minwinbase", "minwindef", "netioapi", "ntlsa", "ntsecapi", "objidl", "oleauto", "pdh", "powerbase", "processenv", "psapi", "rpcdce", "sddl", "securitybaseapi", "shellapi", "std", "stringapiset", "synchapi", "sysinfoapi", "timezoneapi", "wbemcli", "winbase", "wincon", "windef", "winerror", "winioctl", "winnt", "winreg", "winsock2", "winuser", "ws2ipdef", "ws2tcpip", "wtypesbase"] }
windows-sys = { version = "0.48", features = ["Win32_Foundation", "Win32_Networking_WinSock", "Win32_Security", "Win32_Storage_FileSystem", "Win32_System_Console", "Win32_System_IO", "Win32_System_Pipes", "Win32_System_SystemServices", "Win32_System_Threading", "Win32_System_WindowsProgramming", "Win32_UI_Shell"] }
[target.x86_64-pc-windows-msvc.build-dependencies]
once_cell = { version = "1", default-features = false, features = ["unstable"] }
+rustls = { version = "0.21" }
scopeguard = { version = "1" }
-webpki = { version = "0.22", default-features = false, features = ["std"] }
-winapi = { version = "0.3", default-features = false, features = ["basetsd", "cfg", "combaseapi", "consoleapi", "errhandlingapi", "evntrace", "fileapi", "handleapi", "heapapi", "ifdef", "impl-debug", "impl-default", "in6addr", "inaddr", "ioapiset", "iphlpapi", "knownfolders", "lmaccess", "lmapibuf", "lmcons", "memoryapi", "minwinbase", "minwindef", "netioapi", "ntlsa", "ntsecapi", "ntstatus", "objbase", "objidl", "oleauto", "pdh", "powerbase", "processenv", "psapi", "rpcdce", "sddl", "securitybaseapi", "shellapi", "shlobj", "std", "stringapiset", "synchapi", "sysinfoapi", "timezoneapi", "wbemcli", "winbase", "wincon", "windef", "winerror", "winioctl", "winnt", "winreg", "winsock2", "winuser", "ws2ipdef", "ws2tcpip", "wtypesbase"] }
+winapi = { version = "0.3", default-features = false, features = ["basetsd", "cfg", "combaseapi", "consoleapi", "errhandlingapi", "evntrace", "fileapi", "handleapi", "heapapi", "ifdef", "impl-debug", "impl-default", "in6addr", "inaddr", "ioapiset", "iphlpapi", "lmaccess", "lmapibuf", "lmcons", "memoryapi", "minwinbase", "minwindef", "netioapi", "ntlsa", "ntsecapi", "objidl", "oleauto", "pdh", "powerbase", "processenv", "psapi", "rpcdce", "sddl", "securitybaseapi", "shellapi", "std", "stringapiset", "synchapi", "sysinfoapi", "timezoneapi", "wbemcli", "winbase", "wincon", "windef", "winerror", "winioctl", "winnt", "winreg", "winsock2", "winuser", "ws2ipdef", "ws2tcpip", "wtypesbase"] }
windows-sys = { version = "0.48", features = ["Win32_Foundation", "Win32_Networking_WinSock", "Win32_Security", "Win32_Storage_FileSystem", "Win32_System_Console", "Win32_System_IO", "Win32_System_Pipes", "Win32_System_SystemServices", "Win32_System_Threading", "Win32_System_WindowsProgramming", "Win32_UI_Shell"] }
### END HAKARI SECTION
|
cb02262b9dd1658ebf21e2a7f6d30f86c4cd8160
|
Marco Neumann
|
2023-01-23 15:40:35
|
extract "exec DF plan" and "store stream to file" components (#6663)
|
* refactor: extract `PartitionInfo`
* refactor: extract DF exec component
* feat: add some error conversions
* refactor: make fn public
* refactor: extract file sink component
* fix: clippy
| null |
refactor: extract "exec DF plan" and "store stream to file" components (#6663)
* refactor: extract `PartitionInfo`
* refactor: extract DF exec component
* feat: add some error conversions
* refactor: make fn public
* refactor: extract file sink component
* fix: clippy
|
diff --git a/compactor2/src/components/compact/compact_builder.rs b/compactor2/src/components/compact/compact_builder.rs
index b345383abc..76a07cdae0 100644
--- a/compactor2/src/components/compact/compact_builder.rs
+++ b/compactor2/src/components/compact/compact_builder.rs
@@ -18,10 +18,9 @@ use snafu::{ResultExt, Snafu};
use crate::{
components::compact::query_chunk::{to_queryable_parquet_chunk, QueryableParquetChunk},
config::Config,
+ partition_info::PartitionInfo,
};
-use super::partition::PartitionInfo;
-
#[derive(Debug, Snafu)]
#[allow(missing_copy_implementations, missing_docs)]
pub enum Error {
diff --git a/compactor2/src/components/compact/compact_executor.rs b/compactor2/src/components/compact/compact_executor.rs
index 0a6e599417..0a771e342a 100644
--- a/compactor2/src/components/compact/compact_executor.rs
+++ b/compactor2/src/components/compact/compact_executor.rs
@@ -1,25 +1,12 @@
use std::{future, sync::Arc};
-use data_types::{CompactionLevel, ParquetFileParams, SequenceNumber, ShardId};
-use datafusion::{error::DataFusionError, physical_plan::ExecutionPlan};
+use data_types::{CompactionLevel, ParquetFileParams};
+use datafusion::{error::DataFusionError, physical_plan::SendableRecordBatchStream};
use futures::{stream::FuturesOrdered, StreamExt, TryStreamExt};
-use iox_query::exec::{Executor, ExecutorType};
-use iox_time::TimeProvider;
-use observability_deps::tracing::{debug, info, trace, warn};
-use parquet_file::{
- metadata::IoxMetadata,
- serialize::CodecError,
- storage::{ParquetStorage, UploadError},
-};
+use observability_deps::tracing::debug;
use snafu::{ResultExt, Snafu};
-use uuid::Uuid;
-use crate::config::Config;
-
-use super::partition::PartitionInfo;
-
-// fields no longer used but still exists in the catalog
-const MAX_SEQUENCE_NUMBER: i64 = 0;
+use crate::{components::parquet_file_sink::ParquetFileSink, partition_info::PartitionInfo};
/// Compaction errors.
#[derive(Debug, Snafu)]
@@ -32,59 +19,42 @@ pub enum Error {
Persist {
source: parquet_file::storage::UploadError,
},
-
- #[snafu(display("Error executing parquet write task {}", source))]
- ExecuteParquetTask { source: tokio::task::JoinError },
}
/// Executor of a plan
pub(crate) struct CompactExecutor {
- // Partition of the plan to compact
- shard_id: ShardId,
partition: Arc<PartitionInfo>,
- plan: Arc<dyn ExecutionPlan>,
- store: ParquetStorage,
- exec: Arc<Executor>,
- time_provider: Arc<dyn TimeProvider>,
+ streams: Vec<SendableRecordBatchStream>,
+ sink: Arc<dyn ParquetFileSink>,
target_level: CompactionLevel,
}
impl CompactExecutor {
/// Create a new executor
pub fn new(
- plan: Arc<dyn ExecutionPlan>,
- shard_id: ShardId,
+ streams: Vec<SendableRecordBatchStream>,
partition: Arc<PartitionInfo>,
- config: Arc<Config>,
+ sink: Arc<dyn ParquetFileSink>,
target_level: CompactionLevel,
) -> Self {
Self {
- shard_id,
partition,
- plan,
- store: config.parquet_store.clone(),
- exec: Arc::clone(&config.exec),
- time_provider: Arc::clone(&config.time_provider),
+ streams,
+ sink,
target_level,
}
}
pub async fn execute(self) -> Result<Vec<ParquetFileParams>, Error> {
let Self {
- shard_id,
partition,
- plan,
- store,
- exec,
- time_provider,
+ streams,
+ sink,
target_level,
} = self;
- let partition_id = partition.partition_id;
-
// Run to collect each stream of the plan
- let stream_count = plan.output_partitioning().partition_count();
- debug!(stream_count, "running plan with streams");
+ debug!(stream_count = streams.len(), "running plan with streams");
// These streams *must* to run in parallel otherwise a deadlock
// can occur. Since there is a merge in the plan, in order to make
@@ -93,96 +63,13 @@ impl CompactExecutor {
//
// https://github.com/influxdata/influxdb_iox/issues/4306
// https://github.com/influxdata/influxdb_iox/issues/4324
- let compacted_parquet_files: Vec<ParquetFileParams> = (0..stream_count)
- .map(|i| {
- // Prepare variables to pass to the closure
- let ctx = exec.new_context(ExecutorType::Reorg);
- let physical_plan = Arc::clone(&plan);
- let store = store.clone();
- let time_provider = Arc::clone(&time_provider);
- let partition = Arc::clone(&partition);
- let sort_key = partition.sort_key.clone();
- // run as a separate tokio task so files can be written
- // concurrently.
- tokio::task::spawn(async move {
- trace!(partition = i, "executing datafusion partition");
- let data = ctx
- .execute_stream_partitioned(physical_plan, i)
- .await
- .context(ExecuteCompactPlanSnafu)?;
- trace!(partition = i, "built result stream for partition");
-
- let meta = IoxMetadata {
- object_store_id: Uuid::new_v4(),
- creation_timestamp: time_provider.now(),
- shard_id,
- namespace_id: partition.namespace_id,
- namespace_name: partition.namespace_name.clone().into(),
- table_id: partition.table.id,
- table_name: partition.table.name.clone().into(),
- partition_id,
- partition_key: partition.partition_key.clone(),
- max_sequence_number: SequenceNumber::new(MAX_SEQUENCE_NUMBER),
- compaction_level: target_level,
- sort_key: sort_key.clone(),
- };
-
- debug!(
- partition_id = partition_id.get(),
- "executing and uploading compaction StreamSplitExec"
- );
-
- let object_store_id = meta.object_store_id;
- info!(
- partition_id = partition_id.get(),
- object_store_id = object_store_id.to_string(),
- "streaming exec to object store"
- );
-
- // Stream the record batches from the compaction exec, serialize
- // them, and directly upload the resulting Parquet files to
- // object storage.
- let (parquet_meta, file_size) = match store.upload(data, &meta).await {
- Ok(v) => v,
- Err(UploadError::Serialise(CodecError::NoRows)) => {
- // This MAY be a bug.
- //
- // This also may happen legitimately, though very, very
- // rarely. See test_empty_parquet_file_panic for an
- // explanation.
- warn!(
- partition_id = partition_id.get(),
- object_store_id = object_store_id.to_string(),
- "SplitExec produced an empty result stream"
- );
- return Ok(None);
- }
- Err(e) => return Err(Error::Persist { source: e }),
- };
-
- debug!(
- partition_id = partition_id.get(),
- object_store_id = object_store_id.to_string(),
- "file uploaded to object store"
- );
-
- let parquet_file =
- meta.to_parquet_file(partition_id, file_size, &parquet_meta, |name| {
- partition
- .table_schema
- .columns
- .get(name)
- .expect("unknown column")
- .id
- });
-
- Ok(Some(parquet_file))
- })
- })
+ let compacted_parquet_files: Vec<ParquetFileParams> = streams
+ .into_iter()
+ .map(|stream| sink.store(stream, Arc::clone(&partition), target_level))
// NB: FuturesOrdered allows the futures to run in parallel
.collect::<FuturesOrdered<_>>()
// Check for errors in the task
- .map(|t| t.context(ExecuteParquetTaskSnafu)?)
+ .map(|t| t.context(ExecuteCompactPlanSnafu))
// Discard the streams that resulted in empty output / no file uploaded
// to the object store.
.try_filter_map(|v| future::ready(Ok(v)))
diff --git a/compactor2/src/components/compact/compact_files.rs b/compactor2/src/components/compact/compact_files.rs
index e56381a148..4f14af1c47 100644
--- a/compactor2/src/components/compact/compact_files.rs
+++ b/compactor2/src/components/compact/compact_files.rs
@@ -4,13 +4,14 @@ use std::sync::Arc;
use data_types::{CompactionLevel, ParquetFile, ParquetFileParams};
use snafu::{ResultExt, Snafu};
-use crate::config::Config;
-
-use super::{
- compact_builder::CompactPlanBuilder, compact_executor::CompactExecutor,
- partition::PartitionInfo,
+use crate::{
+ components::{df_plan_exec::DataFusionPlanExec, parquet_file_sink::ParquetFileSink},
+ config::Config,
+ partition_info::PartitionInfo,
};
+use super::{compact_builder::CompactPlanBuilder, compact_executor::CompactExecutor};
+
/// Compaction errors.
#[derive(Debug, Snafu)]
#[allow(missing_copy_implementations, missing_docs)]
@@ -37,6 +38,8 @@ pub async fn compact_files(
files: Arc<Vec<ParquetFile>>,
partition_info: Arc<PartitionInfo>,
config: Arc<Config>,
+ plan_exec: Arc<dyn DataFusionPlanExec>,
+ parquet_file_sink: Arc<dyn ParquetFileSink>,
compaction_level: CompactionLevel,
) -> Result<Vec<ParquetFileParams>, Error> {
if files.is_empty() {
@@ -55,14 +58,11 @@ pub async fn compact_files(
.await
.context(BuildCompactPlanSnafu)?;
+ let streams = plan_exec.exec(plan);
+
// execute the plan
- let executor = CompactExecutor::new(
- plan,
- config.shard_id,
- partition_info,
- config,
- compaction_level,
- );
+ let executor =
+ CompactExecutor::new(streams, partition_info, parquet_file_sink, compaction_level);
let compacted_files = executor.execute().await.context(ExecuteCompactPlanSnafu)?;
Ok(compacted_files)
@@ -73,7 +73,14 @@ mod tests {
use data_types::CompactionLevel;
use std::sync::Arc;
- use crate::{components::compact::compact_files::compact_files, test_util::TestSetup};
+ use crate::{
+ components::{
+ compact::compact_files::compact_files,
+ df_plan_exec::{dedicated::DedicatedDataFusionPlanExec, DataFusionPlanExec},
+ parquet_file_sink::{object_store::ObjectStoreParquetFileSink, ParquetFileSink},
+ },
+ test_util::TestSetup,
+ };
#[tokio::test]
async fn test_compact_no_file() {
@@ -81,6 +88,8 @@ mod tests {
// no files
let setup = TestSetup::new(false).await;
+ let exec = exec(&setup);
+ let parquet_file_sink = parquet_file_sink(&setup);
let TestSetup {
files,
partition_info,
@@ -92,6 +101,8 @@ mod tests {
Arc::clone(&files),
Arc::clone(&partition_info),
Arc::clone(&config),
+ exec,
+ parquet_file_sink,
CompactionLevel::FileNonOverlapped,
)
.await
@@ -106,6 +117,8 @@ mod tests {
// Create a test setup with 6 files
let setup = TestSetup::new(true).await;
+ let exec = exec(&setup);
+ let parquet_file_sink = parquet_file_sink(&setup);
let TestSetup {
files,
partition_info,
@@ -120,6 +133,8 @@ mod tests {
Arc::clone(&files),
Arc::clone(&partition_info),
Arc::clone(&config),
+ Arc::clone(&exec),
+ Arc::clone(&parquet_file_sink),
CompactionLevel::FileNonOverlapped,
)
.await
@@ -142,6 +157,8 @@ mod tests {
Arc::clone(&files),
Arc::clone(&partition_info),
Arc::new(config),
+ exec,
+ parquet_file_sink,
CompactionLevel::FileNonOverlapped,
)
.await
@@ -153,4 +170,18 @@ mod tests {
assert_eq!(file.shard_id, shard_id);
}
}
+
+ fn exec(setup: &TestSetup) -> Arc<dyn DataFusionPlanExec> {
+ Arc::new(DedicatedDataFusionPlanExec::new(Arc::clone(
+ &setup.config.exec,
+ )))
+ }
+
+ fn parquet_file_sink(setup: &TestSetup) -> Arc<dyn ParquetFileSink> {
+ Arc::new(ObjectStoreParquetFileSink::new(
+ setup.config.shard_id,
+ setup.config.parquet_store.clone(),
+ Arc::clone(&setup.config.time_provider),
+ ))
+ }
}
diff --git a/compactor2/src/components/compact/mod.rs b/compactor2/src/components/compact/mod.rs
index 0fe229e10a..dd28ee04f1 100644
--- a/compactor2/src/components/compact/mod.rs
+++ b/compactor2/src/components/compact/mod.rs
@@ -1,5 +1,4 @@
pub mod compact_builder;
pub mod compact_executor;
pub mod compact_files;
-pub mod partition;
pub mod query_chunk;
diff --git a/compactor2/src/components/compact/partition.rs b/compactor2/src/components/compact/partition.rs
deleted file mode 100644
index f623235dfb..0000000000
--- a/compactor2/src/components/compact/partition.rs
+++ /dev/null
@@ -1,62 +0,0 @@
-//! Information of a partition for compaction
-
-use std::sync::Arc;
-
-use data_types::{NamespaceId, PartitionId, PartitionKey, Table, TableSchema};
-use schema::sort::SortKey;
-use snafu::Snafu;
-
-/// Compaction errors.
-#[derive(Debug, Snafu)]
-#[allow(missing_copy_implementations, missing_docs)]
-pub enum Error {
- #[snafu(display("Not implemented"))]
- NotImplemented,
-}
-
-/// Information of a partition for compaction
-pub struct PartitionInfo {
- /// the partition
- pub partition_id: PartitionId,
-
- /// Namespace ID
- pub namespace_id: NamespaceId,
-
- /// Namespace name
- pub namespace_name: String,
-
- /// Table.
- pub table: Arc<Table>,
-
- // Table schema
- pub table_schema: Arc<TableSchema>,
-
- /// Sort key of the partition
- pub sort_key: Option<SortKey>,
-
- /// partition_key
- pub partition_key: PartitionKey,
-}
-
-impl PartitionInfo {
- /// Create PartitionInfo for a paquert file
- pub fn new(
- partition_id: PartitionId,
- namespace_id: NamespaceId,
- namespace_name: String,
- table: Arc<Table>,
- table_schema: Arc<TableSchema>,
- sort_key: Option<SortKey>,
- partition_key: PartitionKey,
- ) -> Self {
- Self {
- partition_id,
- namespace_id,
- namespace_name,
- table,
- table_schema,
- sort_key,
- partition_key,
- }
- }
-}
diff --git a/compactor2/src/components/df_plan_exec/dedicated.rs b/compactor2/src/components/df_plan_exec/dedicated.rs
new file mode 100644
index 0000000000..2cf0fd60fc
--- /dev/null
+++ b/compactor2/src/components/df_plan_exec/dedicated.rs
@@ -0,0 +1,49 @@
+use std::{fmt::Display, sync::Arc};
+
+use datafusion::physical_plan::{
+ stream::RecordBatchStreamAdapter, ExecutionPlan, SendableRecordBatchStream,
+};
+use futures::TryStreamExt;
+use iox_query::exec::{Executor, ExecutorType};
+
+use super::DataFusionPlanExec;
+
+#[derive(Debug)]
+pub struct DedicatedDataFusionPlanExec {
+ exec: Arc<Executor>,
+}
+
+impl DedicatedDataFusionPlanExec {
+ pub fn new(exec: Arc<Executor>) -> Self {
+ Self { exec }
+ }
+}
+
+impl Display for DedicatedDataFusionPlanExec {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ write!(f, "dedicated")
+ }
+}
+
+impl DataFusionPlanExec for DedicatedDataFusionPlanExec {
+ fn exec(&self, plan: Arc<dyn ExecutionPlan>) -> Vec<SendableRecordBatchStream> {
+ let stream_count = plan.output_partitioning().partition_count();
+ let schema = plan.schema();
+ let ctx = self.exec.new_context(ExecutorType::Reorg);
+
+ (0..stream_count)
+ .map(|i| {
+ let plan = Arc::clone(&plan);
+ let ctx = ctx.child_ctx("partition");
+
+ let stream =
+ futures::stream::once(
+ async move { ctx.execute_stream_partitioned(plan, i).await },
+ )
+ .try_flatten();
+ let stream = RecordBatchStreamAdapter::new(Arc::clone(&schema), stream);
+ Box::pin(stream) as SendableRecordBatchStream
+ })
+ .collect()
+ }
+}
diff --git a/compactor2/src/components/df_plan_exec/mod.rs b/compactor2/src/components/df_plan_exec/mod.rs
new file mode 100644
index 0000000000..bd62bd038e
--- /dev/null
+++ b/compactor2/src/components/df_plan_exec/mod.rs
@@ -0,0 +1,23 @@
+use std::{
+ fmt::{Debug, Display},
+ sync::Arc,
+};
+
+use datafusion::physical_plan::{ExecutionPlan, SendableRecordBatchStream};
+
+pub mod dedicated;
+
+pub trait DataFusionPlanExec: Debug + Display + Send + Sync {
+ /// Convert DataFusion [`ExecutionPlan`] to multiple output streams.
+ ///
+ /// # Stream Polling
+ /// These streams *must* to run in parallel otherwise a deadlock
+ /// can occur. Since there is a merge in the plan, in order to make
+ /// progress on one stream there must be (potential space) on the
+ /// other streams.
+ ///
+ /// See:
+ /// - <https://github.com/influxdata/influxdb_iox/issues/4306>
+ /// - <https://github.com/influxdata/influxdb_iox/issues/4324>
+ fn exec(&self, plan: Arc<dyn ExecutionPlan>) -> Vec<SendableRecordBatchStream>;
+}
diff --git a/compactor2/src/components/hardcoded.rs b/compactor2/src/components/hardcoded.rs
index 22614ee59b..3455b1b6bc 100644
--- a/compactor2/src/components/hardcoded.rs
+++ b/compactor2/src/components/hardcoded.rs
@@ -6,14 +6,25 @@ use std::sync::Arc;
use data_types::CompactionLevel;
-use crate::config::Config;
+use crate::{
+ components::{
+ namespaces_source::catalog::CatalogNamespacesSource,
+ tables_source::catalog::CatalogTablesSource,
+ },
+ config::Config,
+};
use super::{
commit::{
catalog::CatalogCommit, logging::LoggingCommitWrapper, metrics::MetricsCommitWrapper,
},
+ df_plan_exec::dedicated::DedicatedDataFusionPlanExec,
file_filter::{and::AndFileFilter, level_range::LevelRangeFileFilter},
files_filter::{chain::FilesFilterChain, per_file::PerFileFilesFilter},
+ parquet_file_sink::{
+ dedicated::DedicatedExecParquetFileSinkWrapper, logging::LoggingParquetFileSinkWrapper,
+ object_store::ObjectStoreParquetFileSink,
+ },
partition_error_sink::{
catalog::CatalogPartitionErrorSink, logging::LoggingPartitionErrorSinkWrapper,
metrics::MetricsPartitionErrorSinkWrapper,
@@ -79,17 +90,24 @@ pub fn hardcoded_components(config: &Config) -> Arc<Components> {
CatalogCommit::new(config.backoff_config.clone(), Arc::clone(&config.catalog)),
&config.metric_registry,
))),
- namespaces_source: Arc::new(
- crate::components::namespaces_source::catalog::CatalogNamespacesSource::new(
- config.backoff_config.clone(),
- Arc::clone(&config.catalog),
- ),
- ),
- tables_source: Arc::new(
- crate::components::tables_source::catalog::CatalogTablesSource::new(
- config.backoff_config.clone(),
- Arc::clone(&config.catalog),
+ namespaces_source: Arc::new(CatalogNamespacesSource::new(
+ config.backoff_config.clone(),
+ Arc::clone(&config.catalog),
+ )),
+ tables_source: Arc::new(CatalogTablesSource::new(
+ config.backoff_config.clone(),
+ Arc::clone(&config.catalog),
+ )),
+ df_plan_exec: Arc::new(DedicatedDataFusionPlanExec::new(Arc::clone(&config.exec))),
+ parquet_file_sink: Arc::new(LoggingParquetFileSinkWrapper::new(
+ DedicatedExecParquetFileSinkWrapper::new(
+ ObjectStoreParquetFileSink::new(
+ config.shard_id,
+ config.parquet_store.clone(),
+ Arc::clone(&config.time_provider),
+ ),
+ Arc::clone(&config.exec),
),
- ),
+ )),
})
}
diff --git a/compactor2/src/components/mod.rs b/compactor2/src/components/mod.rs
index 6916e27464..8395873180 100644
--- a/compactor2/src/components/mod.rs
+++ b/compactor2/src/components/mod.rs
@@ -1,7 +1,8 @@
use std::sync::Arc;
use self::{
- commit::Commit, files_filter::FilesFilter, namespaces_source::NamespacesSource,
+ commit::Commit, df_plan_exec::DataFusionPlanExec, files_filter::FilesFilter,
+ namespaces_source::NamespacesSource, parquet_file_sink::ParquetFileSink,
partition_error_sink::PartitionErrorSink, partition_files_source::PartitionFilesSource,
partition_filter::PartitionFilter, partitions_source::PartitionsSource,
tables_source::TablesSource,
@@ -9,10 +10,12 @@ use self::{
pub mod commit;
pub mod compact;
+pub mod df_plan_exec;
pub mod file_filter;
pub mod files_filter;
pub mod hardcoded;
pub mod namespaces_source;
+pub mod parquet_file_sink;
pub mod partition_error_sink;
pub mod partition_files_source;
pub mod partition_filter;
@@ -30,4 +33,6 @@ pub struct Components {
pub commit: Arc<dyn Commit>,
pub namespaces_source: Arc<dyn NamespacesSource>,
pub tables_source: Arc<dyn TablesSource>,
+ pub df_plan_exec: Arc<dyn DataFusionPlanExec>,
+ pub parquet_file_sink: Arc<dyn ParquetFileSink>,
}
diff --git a/compactor2/src/components/parquet_file_sink/dedicated.rs b/compactor2/src/components/parquet_file_sink/dedicated.rs
new file mode 100644
index 0000000000..776b611062
--- /dev/null
+++ b/compactor2/src/components/parquet_file_sink/dedicated.rs
@@ -0,0 +1,60 @@
+use std::{fmt::Display, sync::Arc};
+
+use async_trait::async_trait;
+use data_types::{CompactionLevel, ParquetFileParams};
+use datafusion::{error::DataFusionError, physical_plan::SendableRecordBatchStream};
+use iox_query::exec::{Executor, ExecutorType};
+
+use crate::partition_info::PartitionInfo;
+
+use super::ParquetFileSink;
+
+#[derive(Debug)]
+pub struct DedicatedExecParquetFileSinkWrapper<T>
+where
+ T: ParquetFileSink + 'static,
+{
+ exec: Arc<Executor>,
+ inner: Arc<T>,
+}
+
+impl<T> DedicatedExecParquetFileSinkWrapper<T>
+where
+ T: ParquetFileSink + 'static,
+{
+ pub fn new(inner: T, exec: Arc<Executor>) -> Self {
+ Self {
+ inner: Arc::new(inner),
+ exec,
+ }
+ }
+}
+
+impl<T> Display for DedicatedExecParquetFileSinkWrapper<T>
+where
+ T: ParquetFileSink + 'static,
+{
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ write!(f, "dedicated_exec({})", self.inner)
+ }
+}
+
+#[async_trait]
+impl<T> ParquetFileSink for DedicatedExecParquetFileSinkWrapper<T>
+where
+ T: ParquetFileSink + 'static,
+{
+ async fn store(
+ &self,
+ stream: SendableRecordBatchStream,
+ partition: Arc<PartitionInfo>,
+ level: CompactionLevel,
+ ) -> Result<Option<ParquetFileParams>, DataFusionError> {
+ let inner = Arc::clone(&self.inner);
+ self.exec
+ .executor(ExecutorType::Reorg)
+ .spawn(async move { inner.store(stream, partition, level).await })
+ .await
+ .map_err(|e| DataFusionError::External(e.into()))?
+ }
+}
diff --git a/compactor2/src/components/parquet_file_sink/logging.rs b/compactor2/src/components/parquet_file_sink/logging.rs
new file mode 100644
index 0000000000..3590569caf
--- /dev/null
+++ b/compactor2/src/components/parquet_file_sink/logging.rs
@@ -0,0 +1,78 @@
+use std::{fmt::Display, sync::Arc};
+
+use async_trait::async_trait;
+use data_types::{CompactionLevel, ParquetFileParams};
+use datafusion::{error::DataFusionError, physical_plan::SendableRecordBatchStream};
+use observability_deps::tracing::{info, warn};
+
+use crate::partition_info::PartitionInfo;
+
+use super::ParquetFileSink;
+
+#[derive(Debug)]
+pub struct LoggingParquetFileSinkWrapper<T>
+where
+ T: ParquetFileSink,
+{
+ inner: T,
+}
+
+impl<T> LoggingParquetFileSinkWrapper<T>
+where
+ T: ParquetFileSink,
+{
+ pub fn new(inner: T) -> Self {
+ Self { inner }
+ }
+}
+
+impl<T> Display for LoggingParquetFileSinkWrapper<T>
+where
+ T: ParquetFileSink,
+{
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ write!(f, "logging({})", self.inner)
+ }
+}
+
+#[async_trait]
+impl<T> ParquetFileSink for LoggingParquetFileSinkWrapper<T>
+where
+ T: ParquetFileSink,
+{
+ async fn store(
+ &self,
+ stream: SendableRecordBatchStream,
+ partition: Arc<PartitionInfo>,
+ level: CompactionLevel,
+ ) -> Result<Option<ParquetFileParams>, DataFusionError> {
+ let res = self
+ .inner
+ .store(stream, Arc::clone(&partition), level)
+ .await;
+ match &res {
+ Ok(Some(f)) => {
+ info!(
+ partition_id = partition.partition_id.get(),
+ object_store_id=%f.object_store_id,
+ file_size_bytes=f.file_size_bytes,
+ "Stored file",
+ )
+ }
+ Ok(None) => {
+ warn!(
+ partition_id = partition.partition_id.get(),
+ "SplitExec produced an empty result stream"
+ );
+ }
+ Err(e) => {
+ warn!(
+ %e,
+ partition_id=partition.partition_id.get(),
+ "Error while uploading file",
+ );
+ }
+ }
+ res
+ }
+}
diff --git a/compactor2/src/components/parquet_file_sink/mod.rs b/compactor2/src/components/parquet_file_sink/mod.rs
new file mode 100644
index 0000000000..f3045da4c7
--- /dev/null
+++ b/compactor2/src/components/parquet_file_sink/mod.rs
@@ -0,0 +1,24 @@
+use std::{
+ fmt::{Debug, Display},
+ sync::Arc,
+};
+
+use async_trait::async_trait;
+use data_types::{CompactionLevel, ParquetFileParams};
+use datafusion::{error::DataFusionError, physical_plan::SendableRecordBatchStream};
+
+use crate::partition_info::PartitionInfo;
+
+pub mod dedicated;
+pub mod logging;
+pub mod object_store;
+
+#[async_trait]
+pub trait ParquetFileSink: Debug + Display + Send + Sync {
+ async fn store(
+ &self,
+ stream: SendableRecordBatchStream,
+ partition: Arc<PartitionInfo>,
+ level: CompactionLevel,
+ ) -> Result<Option<ParquetFileParams>, DataFusionError>;
+}
diff --git a/compactor2/src/components/parquet_file_sink/object_store.rs b/compactor2/src/components/parquet_file_sink/object_store.rs
new file mode 100644
index 0000000000..b20a9cf524
--- /dev/null
+++ b/compactor2/src/components/parquet_file_sink/object_store.rs
@@ -0,0 +1,101 @@
+use std::{fmt::Display, sync::Arc};
+
+use async_trait::async_trait;
+use data_types::{CompactionLevel, ParquetFileParams, SequenceNumber, ShardId};
+use datafusion::{error::DataFusionError, physical_plan::SendableRecordBatchStream};
+use iox_time::TimeProvider;
+use parquet_file::{
+ metadata::IoxMetadata,
+ serialize::CodecError,
+ storage::{ParquetStorage, UploadError},
+};
+use uuid::Uuid;
+
+use crate::partition_info::PartitionInfo;
+
+use super::ParquetFileSink;
+
+// fields no longer used but still exists in the catalog
+const MAX_SEQUENCE_NUMBER: i64 = 0;
+
+#[derive(Debug)]
+pub struct ObjectStoreParquetFileSink {
+ shared_id: ShardId,
+ store: ParquetStorage,
+ time_provider: Arc<dyn TimeProvider>,
+}
+
+impl ObjectStoreParquetFileSink {
+ pub fn new(
+ shared_id: ShardId,
+ store: ParquetStorage,
+ time_provider: Arc<dyn TimeProvider>,
+ ) -> Self {
+ Self {
+ shared_id,
+ store,
+ time_provider,
+ }
+ }
+}
+
+impl Display for ObjectStoreParquetFileSink {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ write!(f, "object_store")
+ }
+}
+
+#[async_trait]
+impl ParquetFileSink for ObjectStoreParquetFileSink {
+ async fn store(
+ &self,
+ stream: SendableRecordBatchStream,
+ partition: Arc<PartitionInfo>,
+ level: CompactionLevel,
+ ) -> Result<Option<ParquetFileParams>, DataFusionError> {
+ let meta = IoxMetadata {
+ object_store_id: Uuid::new_v4(),
+ creation_timestamp: self.time_provider.now(),
+ shard_id: self.shared_id,
+ namespace_id: partition.namespace_id,
+ namespace_name: partition.namespace_name.clone().into(),
+ table_id: partition.table.id,
+ table_name: partition.table.name.clone().into(),
+ partition_id: partition.partition_id,
+ partition_key: partition.partition_key.clone(),
+ max_sequence_number: SequenceNumber::new(MAX_SEQUENCE_NUMBER),
+ compaction_level: level,
+ sort_key: partition.sort_key.clone(),
+ };
+
+ // Stream the record batches from the compaction exec, serialize
+ // them, and directly upload the resulting Parquet files to
+ // object storage.
+ let (parquet_meta, file_size) = match self.store.upload(stream, &meta).await {
+ Ok(v) => v,
+ Err(UploadError::Serialise(CodecError::NoRows | CodecError::NoRecordBatches)) => {
+ // This MAY be a bug.
+ //
+ // This also may happen legitimately, though very, very
+ // rarely. See test_empty_parquet_file_panic for an
+ // explanation.
+ return Ok(None);
+ }
+ Err(e) => {
+ return Err(e.into());
+ }
+ };
+
+ let parquet_file =
+ meta.to_parquet_file(partition.partition_id, file_size, &parquet_meta, |name| {
+ partition
+ .table_schema
+ .columns
+ .get(name)
+ .expect("unknown column")
+ .id
+ });
+
+ Ok(Some(parquet_file))
+ }
+}
diff --git a/compactor2/src/components/report.rs b/compactor2/src/components/report.rs
index c55f39cdfa..82935d9824 100644
--- a/compactor2/src/components/report.rs
+++ b/compactor2/src/components/report.rs
@@ -16,6 +16,8 @@ pub fn log_components(components: &Components) {
commit,
tables_source,
namespaces_source,
+ df_plan_exec,
+ parquet_file_sink,
} = components;
info!(
@@ -27,6 +29,8 @@ pub fn log_components(components: &Components) {
%commit,
%tables_source,
%namespaces_source,
+ %df_plan_exec,
+ %parquet_file_sink,
"component setup",
);
}
diff --git a/compactor2/src/driver.rs b/compactor2/src/driver.rs
index b6d0ac4c56..7765af530d 100644
--- a/compactor2/src/driver.rs
+++ b/compactor2/src/driver.rs
@@ -5,11 +5,9 @@ use futures::StreamExt;
use observability_deps::tracing::info;
use crate::{
- components::{
- compact::{compact_files::compact_files, partition::PartitionInfo},
- Components,
- },
+ components::{compact::compact_files::compact_files, Components},
config::Config,
+ partition_info::PartitionInfo,
};
// TODO: modify this comments accordingly as we go
@@ -105,15 +103,15 @@ pub async fn compact(config: &Config, components: &Arc<Components>) {
}
let table_schema = table_schema.unwrap();
- let partition_info = PartitionInfo::new(
+ let partition_info = PartitionInfo {
partition_id,
- table.namespace_id,
- namespace.name,
- Arc::new(table),
- Arc::new(table_schema.clone()),
- partition.sort_key(),
- partition.partition_key,
- );
+ namespace_id: table.namespace_id,
+ namespace_name: namespace.name,
+ table: Arc::new(table),
+ table_schema: Arc::new(table_schema.clone()),
+ sort_key: partition.sort_key(),
+ partition_key: partition.partition_key,
+ };
let files = Arc::new(files);
@@ -126,6 +124,8 @@ pub async fn compact(config: &Config, components: &Arc<Components>) {
Arc::clone(&files),
Arc::new(partition_info),
Arc::new(config),
+ Arc::clone(&components.df_plan_exec),
+ Arc::clone(&components.parquet_file_sink),
CompactionLevel::FileNonOverlapped,
)
.await
diff --git a/compactor2/src/lib.rs b/compactor2/src/lib.rs
index 64f395bccd..5d2ba5b48f 100644
--- a/compactor2/src/lib.rs
+++ b/compactor2/src/lib.rs
@@ -15,6 +15,7 @@ pub mod compactor;
mod components;
pub mod config;
mod driver;
+mod partition_info;
#[cfg(test)]
mod test_util;
diff --git a/compactor2/src/partition_info.rs b/compactor2/src/partition_info.rs
new file mode 100644
index 0000000000..48141737b1
--- /dev/null
+++ b/compactor2/src/partition_info.rs
@@ -0,0 +1,31 @@
+//! Information of a partition for compaction
+
+use std::sync::Arc;
+
+use data_types::{NamespaceId, PartitionId, PartitionKey, Table, TableSchema};
+use schema::sort::SortKey;
+
+/// Information of a partition for compaction
+#[derive(Debug)]
+pub struct PartitionInfo {
+ /// the partition
+ pub partition_id: PartitionId,
+
+ /// Namespace ID
+ pub namespace_id: NamespaceId,
+
+ /// Namespace name
+ pub namespace_name: String,
+
+ /// Table.
+ pub table: Arc<Table>,
+
+ // Table schema
+ pub table_schema: Arc<TableSchema>,
+
+ /// Sort key of the partition
+ pub sort_key: Option<SortKey>,
+
+ /// partition_key
+ pub partition_key: PartitionKey,
+}
diff --git a/compactor2/src/test_util.rs b/compactor2/src/test_util.rs
index adf4da0405..27ec377e62 100644
--- a/compactor2/src/test_util.rs
+++ b/compactor2/src/test_util.rs
@@ -13,8 +13,8 @@ use schema::sort::SortKey;
use uuid::Uuid;
use crate::{
- components::{compact::partition::PartitionInfo, namespaces_source::mock::NamespaceWrapper},
- config::Config,
+ components::namespaces_source::mock::NamespaceWrapper, config::Config,
+ partition_info::PartitionInfo,
};
#[derive(Debug)]
@@ -223,7 +223,7 @@ const SPLIT_PERCENTAGE: u16 = 80;
pub struct TestSetup {
pub files: Arc<Vec<ParquetFile>>,
- pub partition_info: Arc<crate::components::compact::partition::PartitionInfo>,
+ pub partition_info: Arc<PartitionInfo>,
pub catalog: Arc<TestCatalog>,
pub table: Arc<TestTable>,
pub config: Arc<Config>,
@@ -252,15 +252,15 @@ impl TestSetup {
let sort_key = SortKey::from_columns(["tag1", "tag2", "tag3", "time"]);
let partition = partition.update_sort_key(sort_key.clone()).await;
- let candidate_partition = Arc::new(PartitionInfo::new(
- partition.partition.id,
- ns.namespace.id,
- ns.namespace.name.clone(),
- Arc::new(table.table.clone()),
- Arc::new(table_schema),
- partition.partition.sort_key(),
- partition.partition.partition_key.clone(),
- ));
+ let candidate_partition = Arc::new(PartitionInfo {
+ partition_id: partition.partition.id,
+ namespace_id: ns.namespace.id,
+ namespace_name: ns.namespace.name.clone(),
+ table: Arc::new(table.table.clone()),
+ table_schema: Arc::new(table_schema),
+ sort_key: partition.partition.sort_key(),
+ partition_key: partition.partition.partition_key.clone(),
+ });
let mut parquet_files = vec![];
if with_files {
diff --git a/iox_query/src/exec.rs b/iox_query/src/exec.rs
index 5a67510e77..02f6352c45 100644
--- a/iox_query/src/exec.rs
+++ b/iox_query/src/exec.rs
@@ -205,7 +205,7 @@ impl Executor {
}
/// Return the execution pool of the specified type
- fn executor(&self, executor_type: ExecutorType) -> &DedicatedExecutor {
+ pub fn executor(&self, executor_type: ExecutorType) -> &DedicatedExecutor {
match executor_type {
ExecutorType::Query => &self.executors.query_exec,
ExecutorType::Reorg => &self.executors.reorg_exec,
diff --git a/parquet_file/src/serialize.rs b/parquet_file/src/serialize.rs
index b393aff086..51831cfa9e 100644
--- a/parquet_file/src/serialize.rs
+++ b/parquet_file/src/serialize.rs
@@ -5,7 +5,7 @@
use std::{io::Write, sync::Arc};
use arrow::error::ArrowError;
-use datafusion::physical_plan::SendableRecordBatchStream;
+use datafusion::{error::DataFusionError, physical_plan::SendableRecordBatchStream};
use datafusion_util::config::BATCH_SIZE;
use futures::{pin_mut, TryStreamExt};
use observability_deps::tracing::{debug, trace, warn};
@@ -62,6 +62,19 @@ pub enum CodecError {
CloneSink(std::io::Error),
}
+impl From<CodecError> for DataFusionError {
+ fn from(value: CodecError) -> Self {
+ match value {
+ e @ (CodecError::NoRecordBatches
+ | CodecError::NoRows
+ | CodecError::MetadataSerialisation(_)
+ | CodecError::CloneSink(_)) => Self::External(Box::new(e)),
+ CodecError::Arrow(e) => Self::ArrowError(e),
+ CodecError::Writer(e) => Self::ParquetError(e),
+ }
+ }
+}
+
/// An IOx-specific, streaming [`RecordBatch`] to parquet file encoder.
///
/// This encoder discovers the schema from the first item in `batches`, and
diff --git a/parquet_file/src/storage.rs b/parquet_file/src/storage.rs
index ceb27b207f..51f15b821a 100644
--- a/parquet_file/src/storage.rs
+++ b/parquet_file/src/storage.rs
@@ -52,6 +52,18 @@ pub enum UploadError {
Upload(#[from] object_store::Error),
}
+impl From<UploadError> for DataFusionError {
+ fn from(value: UploadError) -> Self {
+ match value {
+ UploadError::Serialise(e) => {
+ Self::Context(String::from("serialize"), Box::new(e.into()))
+ }
+ UploadError::Metadata(e) => Self::External(Box::new(e)),
+ UploadError::Upload(e) => Self::ObjectStore(e),
+ }
+ }
+}
+
/// ID for an object store hooked up into DataFusion.
#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)]
pub struct StorageId(&'static str);
|
c0fcd5e32aedecaef45d9c94aca2a171bd44c19f
|
Jake Goulding
|
2022-11-09 04:37:55
|
Ensure router's HTTP error messages are stable (#6006)
|
* test: Ensure router's HTTP error messages are stable
If you change the text of an error, the tests will fail.
If you add a new error variant to the `Error` enum but don't add it to
the test, test compilation will fail with a "non-exhaustive patterns"
message.
If you remove an error variant, test compilation will fail with a "no
variant named `RemovedError`" message.
You can get the list of error variants and their current text via
`cargo test -p router -- print_out_error_text --nocapture`.
A step towards accomplishing #5863
Co-authored-by: Carol (Nichols || Goulding) <[email protected]>
Co-authored-by: Jake Goulding <[email protected]>
* fix: Remove optional commas and document macro arguments
* docs: Clarify the purpose of the tests the check_errors macro generates
* fix: Add tests for inner mutable batch LP error variants
|
Co-authored-by: Carol (Nichols || Goulding) <[email protected]>
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
test: Ensure router's HTTP error messages are stable (#6006)
* test: Ensure router's HTTP error messages are stable
If you change the text of an error, the tests will fail.
If you add a new error variant to the `Error` enum but don't add it to
the test, test compilation will fail with a "non-exhaustive patterns"
message.
If you remove an error variant, test compilation will fail with a "no
variant named `RemovedError`" message.
You can get the list of error variants and their current text via
`cargo test -p router -- print_out_error_text --nocapture`.
A step towards accomplishing #5863
Co-authored-by: Carol (Nichols || Goulding) <[email protected]>
Co-authored-by: Jake Goulding <[email protected]>
* fix: Remove optional commas and document macro arguments
* docs: Clarify the purpose of the tests the check_errors macro generates
* fix: Add tests for inner mutable batch LP error variants
Co-authored-by: Carol (Nichols || Goulding) <[email protected]>
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
diff --git a/Cargo.lock b/Cargo.lock
index 3537b07ce8..6012035630 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -4295,6 +4295,7 @@ dependencies = [
"generated_types",
"hashbrown",
"hyper",
+ "influxdb_line_protocol",
"iox_catalog",
"iox_tests",
"iox_time",
diff --git a/router/Cargo.toml b/router/Cargo.toml
index 8bf6094358..b2c59455b8 100644
--- a/router/Cargo.toml
+++ b/router/Cargo.toml
@@ -45,6 +45,7 @@ write_summary = { path = "../write_summary" }
[dev-dependencies]
assert_matches = "1.5"
criterion = { version = "0.4", default-features = false, features = ["async_tokio", "rayon"]}
+influxdb_line_protocol = { path = "../influxdb_line_protocol" }
iox_tests = { path = "../iox_tests" }
once_cell = "1"
paste = "1.0.9"
diff --git a/router/src/server/http.rs b/router/src/server/http.rs
index 53c8659ed7..85c0db67e3 100644
--- a/router/src/server/http.rs
+++ b/router/src/server/http.rs
@@ -2,8 +2,6 @@
mod delete_predicate;
-use std::{str::Utf8Error, time::Instant};
-
use bytes::{Bytes, BytesMut};
use data_types::{org_and_bucket_to_database, OrgBucketMappingError};
use futures::StreamExt;
@@ -16,6 +14,7 @@ use mutable_batch_lp::LinesConverter;
use observability_deps::tracing::*;
use predicate::delete_predicate::parse_delete_predicate;
use serde::Deserialize;
+use std::{str::Utf8Error, time::Instant};
use thiserror::Error;
use tokio::sync::{Semaphore, TryAcquireError};
use trace::ctx::SpanContext;
@@ -370,7 +369,12 @@ where
let namespace = org_and_bucket_to_database(&write_info.org, &write_info.bucket)
.map_err(OrgBucketError::MappingFail)?;
- trace!(org=%write_info.org, bucket=%write_info.bucket, %namespace, "processing write request");
+ trace!(
+ org=%write_info.org,
+ bucket=%write_info.bucket,
+ %namespace,
+ "processing write request"
+ );
// Read the HTTP body and convert it to a str.
let body = self.read_body(req).await?;
@@ -538,24 +542,23 @@ where
#[cfg(test)]
mod tests {
- use std::{io::Write, iter, sync::Arc, time::Duration};
-
+ use super::*;
+ use crate::{
+ dml_handlers::mock::{MockDmlHandler, MockDmlHandlerCall},
+ namespace_resolver::mock::MockNamespaceResolver,
+ };
use assert_matches::assert_matches;
- use data_types::NamespaceId;
+ use data_types::{DatabaseNameError, NamespaceId};
use flate2::{write::GzEncoder, Compression};
use hyper::header::HeaderValue;
use metric::{Attributes, Metric};
use mutable_batch::column::ColumnData;
use mutable_batch_lp::LineWriteError;
+ use serde::de::Error as _;
+ use std::{io::Write, iter, sync::Arc, time::Duration};
use test_helpers::timeout::FutureTimeout;
use tokio_stream::wrappers::ReceiverStream;
- use super::*;
- use crate::{
- dml_handlers::mock::{MockDmlHandler, MockDmlHandlerCall},
- namespace_resolver::mock::MockNamespaceResolver,
- };
-
const MAX_BYTES: usize = 1024;
const NAMESPACE_ID: NamespaceId = NamespaceId::new(42);
@@ -1313,4 +1316,194 @@ mod tests {
// And the request rejected metric must remain unchanged
assert_metric_hit(&*metrics, "http_request_limit_rejected", Some(1));
}
+
+ // The display text of Error gets passed through `ioxd_router::IoxHttpErrorAdaptor` then
+ // `ioxd_common::http::error::HttpApiError` as the JSON "message" value in error response
+ // bodies. These are fixture tests to document error messages that users might see when
+ // making requests to `/api/v2/write`.
+ macro_rules! check_errors {
+ (
+ $(( // This macro expects a list of tuples, each specifying:
+ $variant:ident // - One of the error enum variants
+ $(($data:expr))?, // - If needed, an expression to construct the variant's data
+ $msg:expr $(,)? // - The string expected for `Display`ing this variant
+ )),*,
+ ) => {
+ // Generate code that contains all possible error variants, to ensure a compiler error
+ // if any errors are not explicitly covered in this test.
+ #[test]
+ fn all_error_variants_are_checked() {
+ #[allow(dead_code)]
+ fn all_documented(ensure_all_error_variants_are_checked: Error) {
+ #[allow(unreachable_patterns)]
+ match ensure_all_error_variants_are_checked {
+ $(Error::$variant { .. } => {},)*
+ // If this test doesn't compile because of a non-exhaustive pattern,
+ // a variant needs to be added to the `check_errors!` call with the
+ // expected `to_string()` text.
+ }
+ }
+ }
+
+ // A test that covers all errors given to this macro.
+ #[tokio::test]
+ async fn error_messages_match() {
+ // Generate an assert for each error given to this macro.
+ $(
+ let e = Error::$variant $(($data))?;
+ assert_eq!(e.to_string(), $msg);
+ )*
+ }
+
+ #[test]
+ fn print_out_error_text() {
+ println!("{}", concat!($(stringify!($variant), "\t", $msg, "\n",)*),)
+ }
+ };
+ }
+
+ check_errors! {
+ (
+ NoHandler,
+ "not found",
+ ),
+
+ (InvalidOrgBucket(OrgBucketError::NotSpecified), "no org/bucket destination provided"),
+
+ (
+ InvalidOrgBucket({
+ let e = serde::de::value::Error::custom("[deserialization error]");
+ OrgBucketError::DecodeFail(e)
+ }),
+ "failed to deserialize org/bucket/precision in request: [deserialization error]",
+ ),
+
+ (
+ InvalidOrgBucket(OrgBucketError::MappingFail(OrgBucketMappingError::NotSpecified)),
+ "missing org/bucket value",
+ ),
+
+ (
+ InvalidOrgBucket({
+ let e = DatabaseNameError::LengthConstraint { name: "[too long name]".into() };
+ let e = OrgBucketMappingError::InvalidDatabaseName { source: e };
+ OrgBucketError::MappingFail(e)
+ }),
+ "Invalid database name: \
+ Database name [too long name] length must be between 1 and 64 characters",
+ ),
+
+ (
+ NonUtf8Body(std::str::from_utf8(&[0, 159]).unwrap_err()),
+ "body content is not valid utf8: invalid utf-8 sequence of 1 bytes from index 1",
+ ),
+
+ (
+ NonUtf8ContentHeader({
+ hyper::header::HeaderValue::from_bytes(&[159]).unwrap().to_str().unwrap_err()
+ }),
+ "invalid content-encoding header: failed to convert header to a str",
+ ),
+
+ (
+ InvalidContentEncoding("[invalid content encoding value]".into()),
+ "unacceptable content-encoding: [invalid content encoding value]",
+ ),
+
+ (
+ ClientHangup({
+ let url = "wrong://999.999.999.999:999999".parse().unwrap();
+ hyper::Client::new().get(url).await.unwrap_err()
+ }),
+ "client disconnected",
+ ),
+
+ (
+ RequestSizeExceeded(1337),
+ "max request size (1337 bytes) exceeded",
+ ),
+
+ (
+ InvalidGzip(std::io::Error::new(std::io::ErrorKind::Other, "[io Error]")),
+ "error decoding gzip stream: [io Error]",
+ ),
+
+ (
+ ParseLineProtocol(mutable_batch_lp::Error::LineProtocol {
+ source: influxdb_line_protocol::Error::FieldSetMissing,
+ line: 42,
+ }),
+ "failed to parse line protocol: \
+ error parsing line 42 (1-based): No fields were provided",
+ ),
+
+ (
+ ParseLineProtocol(mutable_batch_lp::Error::Write {
+ source: mutable_batch_lp::LineWriteError::DuplicateTag {
+ name: "host".into(),
+ },
+ line: 42,
+ }),
+ "failed to parse line protocol: \
+ error writing line 42: \
+ the tag 'host' is specified more than once with conflicting values",
+ ),
+
+ (
+ ParseLineProtocol(mutable_batch_lp::Error::Write {
+ source: mutable_batch_lp::LineWriteError::ConflictedFieldTypes {
+ name: "bananas".into(),
+ },
+ line: 42,
+ }),
+ "failed to parse line protocol: \
+ error writing line 42: \
+ the field 'bananas' is specified more than once with conflicting types",
+ ),
+
+ (
+ ParseLineProtocol(mutable_batch_lp::Error::EmptyPayload),
+ "failed to parse line protocol: empty write payload",
+ ),
+
+ (
+ ParseLineProtocol(mutable_batch_lp::Error::TimestampOverflow),
+ "failed to parse line protocol: timestamp overflows i64",
+ ),
+
+ (
+ ParseDelete({
+ predicate::delete_predicate::Error::InvalidSyntax { value: "[syntax]".into() }
+ }),
+ "failed to parse delete predicate: Invalid predicate syntax: ([syntax])",
+ ),
+
+ (
+ ParseHttpDelete({
+ delete_predicate::Error::TableInvalid { value: "[table name]".into() }
+ }),
+ "failed to parse delete predicate from http request: \
+ Invalid table name in delete '[table name]'"
+ ),
+
+ (
+ DmlHandler(DmlError::DatabaseNotFound("[database name]".into())),
+ "dml handler error: database [database name] does not exist",
+ ),
+
+ (
+ NamespaceResolver({
+ let e = iox_catalog::interface::Error::NameExists { name: "[name]".into() };
+ crate::namespace_resolver::Error::Lookup(e)
+ }),
+ "failed to resolve namespace ID: \
+ failed to resolve namespace ID: \
+ name [name] already exists",
+ ),
+
+ (
+ RequestLimit,
+ "this service is overloaded, please try again later",
+ ),
+ }
}
|
3263934db1f16a0bd8f5fa3d3217590c00506890
|
Andrew Lamb
|
2023-04-26 11:22:13
|
Run end to end integration tests as part of a separate circle CI Job (#7658)
|
* chore: Run end to end integration tests as part of a separate circle CI Job
* fix: but
* feat: update job name
* chore: Update resources
---------
|
Co-authored-by: Dom <[email protected]>
|
chore: Run end to end integration tests as part of a separate circle CI Job (#7658)
* chore: Run end to end integration tests as part of a separate circle CI Job
* fix: but
* feat: update job name
* chore: Update resources
---------
Co-authored-by: Dom <[email protected]>
|
diff --git a/.circleci/config.yml b/.circleci/config.yml
index f548cf6601..cd50791d52 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -225,14 +225,16 @@ jobs:
echo "No changes to commit"
fi
- test:
+ # Run integration tests that need postgres and other external dependencies
+ # such as influxdb v1 and java
+ test_integration:
# setup multiple docker images (see https://circleci.com/docs/2.0/configuration-reference/#docker)
docker:
- image: quay.io/influxdb/rust:ci
- image: postgres
environment:
POSTGRES_HOST_AUTH_METHOD: trust
- resource_class: 2xlarge+ # use of a smaller executor tends crashes on link
+ resource_class: xlarge # use of a smaller executor tends crashes on link
environment:
# Disable incremental compilation to avoid overhead. We are not preserving these files anyway.
CARGO_INCREMENTAL: "0"
@@ -258,23 +260,32 @@ jobs:
# to ingester2, which does have a "persist-per-namespace" API that means tests can run on
# shared MiniClusters.
RUST_TEST_THREADS: 8
+ # Run the JDBC tests
+ TEST_INFLUXDB_JDBC: "true"
steps:
- checkout
+ - run:
+ name: Install javac
+ command: |
+ sudo apt-get update
+ sudo apt-get install openjdk-11-jdk -y
- rust_components
- cache_restore
- run:
- name: Cargo test
- command: cargo test --workspace
+ name: cargo test -p influxdb2_client
+ command: cargo test -p influxdb2_client
+ - run:
+ name: cargo test -p iox_catalog
+ command: cargo test -p iox_catalog
+ - run:
+ name: cargo test --test end_to_end
+ command: cargo test --test end_to_end
- cache_save
- # end to end test for JDBC
- test-flight-sql-jdbc:
- # setup multiple docker images (see https://circleci.com/docs/2.0/configuration-reference/#docker)
+ # Run all tests (without external dependencies, like a developer would)
+ test:
docker:
- image: quay.io/influxdb/rust:ci
- - image: postgres
- environment:
- POSTGRES_HOST_AUTH_METHOD: trust
resource_class: 2xlarge+ # use of a smaller executor tends crashes on link
environment:
# Disable incremental compilation to avoid overhead. We are not preserving these files anyway.
@@ -285,28 +296,14 @@ jobs:
# https://github.com/rust-lang/cargo/issues/10280
CARGO_NET_GIT_FETCH_WITH_CLI: "true"
RUST_BACKTRACE: "1"
- # Run integration tests
- TEST_INTEGRATION: 1
- INFLUXDB_IOX_INTEGRATION_LOCAL: 1
- POSTGRES_USER: postgres
- TEST_INFLUXDB_IOX_CATALOG_DSN: "postgres://postgres@localhost/iox_shared"
- # When removing this, also remove the ignore on the test in trogging/src/cli.rs
- RUST_LOG: debug,,hyper::proto::h1=info,h2=info
- LOG_FILTER: debug,,hyper::proto::h1=info,h2=info
- # Run the actual test
- TEST_INFLUXDB_JDBC: "true"
steps:
- checkout
- - run:
- name: Install javac
- command: |
- sudo apt-get update
- sudo apt-get install openjdk-11-jdk -y
- rust_components
- cache_restore
- run:
- name: Cargo test
- command: cargo test --test end_to_end jdbc
+ name: cargo test --workspace
+ command: cargo test --workspace
+ - cache_save
# end to end tests with Heappy (heap profiling enabled)
@@ -328,7 +325,7 @@ jobs:
- rust_components
- cache_restore
- run:
- name: End to end tests (with heappy)
+ name: cargo test --no-default-features --features=heappy end_to_end
command: cargo test --no-default-features --features=heappy end_to_end
- cache_save
@@ -563,10 +560,10 @@ workflows:
- docs-lint
- test
- test_heappy
+ - test_integration
- build_dev
- doc
- workspace_hack_checks
- - test-flight-sql-jdbc
- build_release:
filters:
branches:
@@ -583,10 +580,10 @@ workflows:
- docs-lint
- test
- test_heappy
+ - test_integration
- build_dev
- build_release
- doc
- - test-flight-sql-jdbc
# Manual build of CI image
ci_image:
|
e1825ec45b8ee6fa1fe3286d2c53e068eac68099
|
Marco Neumann
|
2023-05-04 11:08:46
|
use struct-style selectors in InfluxRPC (#7742)
|
Some clean up before I implement the core logic for #7533.
| null |
refactor: use struct-style selectors in InfluxRPC (#7742)
Some clean up before I implement the core logic for #7533.
|
diff --git a/iox_query_influxrpc/src/lib.rs b/iox_query_influxrpc/src/lib.rs
index 1532e5e039..23fb1bfec7 100644
--- a/iox_query_influxrpc/src/lib.rs
+++ b/iox_query_influxrpc/src/lib.rs
@@ -5,8 +5,11 @@ use data_types::ChunkId;
use datafusion::{
common::DFSchemaRef,
error::DataFusionError,
- logical_expr::{utils::exprlist_to_columns, ExprSchemable, LogicalPlan, LogicalPlanBuilder},
+ logical_expr::{
+ utils::exprlist_to_columns, ExprSchemable, GetIndexedField, LogicalPlan, LogicalPlanBuilder,
+ },
prelude::{when, Column, Expr},
+ scalar::ScalarValue,
};
use datafusion_util::AsExpr;
use futures::{Stream, StreamExt, TryStreamExt};
@@ -35,7 +38,9 @@ use predicate::{
use query_functions::{
group_by::{Aggregate, WindowDuration},
make_window_bound_expr,
- selectors::{selector_first, selector_last, selector_max, selector_min, SelectorOutput},
+ selectors::{
+ struct_selector_first, struct_selector_last, struct_selector_max, struct_selector_min,
+ },
};
use schema::{InfluxColumnType, Projection, Schema, TIME_COLUMN_NAME};
use snafu::{ensure, OptionExt, ResultExt, Snafu};
@@ -1636,7 +1641,6 @@ pub(crate) struct AggExprs {
struct FieldExpr<'a> {
expr: Expr,
name: &'a str,
- datatype: &'a DataType,
}
// Returns an iterator of fields from schema that pass the predicate. If there
@@ -1674,7 +1678,6 @@ fn filtered_fields_iter<'a>(
Some(FieldExpr {
expr: expr.alias(f.name()),
name: f.name(),
- datatype: f.data_type(),
})
})
}
@@ -1735,22 +1738,25 @@ impl AggExprs {
let mut field_list = Vec::new();
for field in filtered_fields_iter(schema, predicate) {
+ let selector = make_selector_expr(agg, field.clone())?;
+
let field_name = field.name;
- agg_exprs.push(make_selector_expr(
- agg,
- SelectorOutput::Value,
- field.clone(),
- field_name,
- )?);
+ agg_exprs.push(
+ Expr::GetIndexedField(GetIndexedField {
+ expr: Box::new(selector.clone()),
+ key: ScalarValue::from("value"),
+ })
+ .alias(field_name),
+ );
let time_column_name = format!("{TIME_COLUMN_NAME}_{field_name}");
-
- agg_exprs.push(make_selector_expr(
- agg,
- SelectorOutput::Time,
- field,
- &time_column_name,
- )?);
+ agg_exprs.push(
+ Expr::GetIndexedField(GetIndexedField {
+ expr: Box::new(selector.clone()),
+ key: ScalarValue::from("time"),
+ })
+ .alias(&time_column_name),
+ );
field_list.push((
Arc::from(field_name), // value name
@@ -1782,7 +1788,6 @@ impl AggExprs {
agg,
FieldExpr {
expr: field.name().as_expr(),
- datatype: field.data_type(),
name: field.name(),
},
)
@@ -1866,23 +1871,16 @@ fn make_agg_expr(agg: Aggregate, field_expr: FieldExpr<'_>) -> Result<Expr> {
/// ELSE NULL
/// END) as col_name
///
-fn make_selector_expr<'a>(
- agg: Aggregate,
- output: SelectorOutput,
- field: FieldExpr<'a>,
- col_name: &'a str,
-) -> Result<Expr> {
+fn make_selector_expr(agg: Aggregate, field: FieldExpr<'_>) -> Result<Expr> {
let uda = match agg {
- Aggregate::First => selector_first(field.datatype, output),
- Aggregate::Last => selector_last(field.datatype, output),
- Aggregate::Min => selector_min(field.datatype, output),
- Aggregate::Max => selector_max(field.datatype, output),
+ Aggregate::First => struct_selector_first(),
+ Aggregate::Last => struct_selector_last(),
+ Aggregate::Min => struct_selector_min(),
+ Aggregate::Max => struct_selector_max(),
_ => return InternalAggregateNotSelectorSnafu { agg }.fail(),
};
- Ok(uda
- .call(vec![field.expr, TIME_COLUMN_NAME.as_expr()])
- .alias(col_name))
+ Ok(uda.call(vec![field.expr, TIME_COLUMN_NAME.as_expr()]))
}
/// Orders chunks so it is likely that the ones that already have cached data are pulled first.
|
b1cdb928f65d3ab9bf46c6aa1435b7200837e585
|
Dom Dwyer
|
2023-08-03 16:59:06
|
always log error message
|
Always log the actual error as it may change.
| null |
refactor: always log error message
Always log the actual error as it may change.
|
diff --git a/router/src/gossip/traits.rs b/router/src/gossip/traits.rs
index 9c90752853..77d656ede5 100644
--- a/router/src/gossip/traits.rs
+++ b/router/src/gossip/traits.rs
@@ -18,11 +18,8 @@ pub trait SchemaBroadcast: Send + Sync + Debug {
#[async_trait]
impl SchemaBroadcast for Arc<gossip::GossipHandle> {
async fn broadcast(&self, payload: Vec<u8>) {
- if gossip::GossipHandle::broadcast(self, payload)
- .await
- .is_err()
- {
- error!("payload size exceeds maximum allowed");
+ if let Err(e) = gossip::GossipHandle::broadcast(self, payload).await {
+ error!(error=%e, "failed to broadcast payload");
}
}
}
|
15cff11b08cf3acc8804266afaa80f5f52daa781
|
Dom Dwyer
|
2022-12-20 15:01:10
|
explicit worker module
|
Separate out persist worker types & routines into a separate worker
module rather than commingling them with the persist handle, and rename
the unimaginative "inner" to reflect the actual usage.
| null |
refactor(persist): explicit worker module
Separate out persist worker types & routines into a separate worker
module rather than commingling them with the persist handle, and rename
the unimaginative "inner" to reflect the actual usage.
|
diff --git a/ingester2/src/persist/context.rs b/ingester2/src/persist/context.rs
index 7b54672488..340e1687f1 100644
--- a/ingester2/src/persist/context.rs
+++ b/ingester2/src/persist/context.rs
@@ -28,7 +28,7 @@ use crate::{
persist::compact::{compact_persisting_batch, CompactedStream},
};
-use super::handle::Inner;
+use super::worker::SharedWorkerState;
/// Errors a persist can experience.
#[derive(Debug, Error)]
@@ -81,7 +81,8 @@ impl PersistRequest {
pub(super) struct Context {
partition: Arc<Mutex<PartitionData>>,
data: PersistingData,
- inner: Arc<Inner>,
+ worker_state: Arc<SharedWorkerState>,
+
/// IDs loaded from the partition at construction time.
namespace_id: NamespaceId,
table_id: TableId,
@@ -134,7 +135,7 @@ impl Context {
///
/// Locks the [`PartitionData`] in `req` to read various properties which
/// are then cached in the [`Context`].
- pub(super) fn new(req: PersistRequest, inner: Arc<Inner>) -> Self {
+ pub(super) fn new(req: PersistRequest, worker_state: Arc<SharedWorkerState>) -> Self {
let partition_id = req.data.partition_id();
// Obtain the partition lock and load the immutable values that will be
@@ -156,7 +157,7 @@ impl Context {
Self {
partition,
data,
- inner,
+ worker_state,
namespace_id: guard.namespace_id(),
table_id: guard.table_id(),
partition_id,
@@ -208,7 +209,7 @@ impl Context {
// This demands the deferred load values and may have to wait for them
// to be loaded before compaction starts.
compact_persisting_batch(
- &self.inner.exec,
+ &self.worker_state.exec,
sort_key,
self.table_name.get().await,
self.data.query_adaptor(),
@@ -263,7 +264,7 @@ impl Context {
//
// This call retries until it completes.
let (md, file_size) = self
- .inner
+ .worker_state
.store
.upload(record_stream, &iox_metadata)
.await
@@ -285,7 +286,7 @@ impl Context {
// -> column IDs.
let table_schema = Backoff::new(&Default::default())
.retry_all_errors("get table schema", || async {
- let mut repos = self.inner.catalog.repositories().await;
+ let mut repos = self.worker_state.catalog.repositories().await;
get_table_schema_by_id(self.table_id, repos.as_mut()).await
})
.await
@@ -329,7 +330,7 @@ impl Context {
.retry_with_backoff("cas_sort_key", || {
let old_sort_key = old_sort_key.clone();
let new_sort_key_str = new_sort_key.to_columns().collect::<Vec<_>>();
- let catalog = Arc::clone(&self.inner.catalog);
+ let catalog = Arc::clone(&self.worker_state.catalog);
async move {
let mut repos = catalog.repositories().await;
@@ -461,7 +462,7 @@ impl Context {
// parquet file by polling / querying the catalog.
Backoff::new(&Default::default())
.retry_all_errors("add parquet file to catalog", || async {
- let mut repos = self.inner.catalog.repositories().await;
+ let mut repos = self.worker_state.catalog.repositories().await;
let parquet_file = repos
.parquet_files()
.create(parquet_table_data.clone())
diff --git a/ingester2/src/persist/handle.rs b/ingester2/src/persist/handle.rs
index aebbb218de..649406ea83 100644
--- a/ingester2/src/persist/handle.rs
+++ b/ingester2/src/persist/handle.rs
@@ -1,7 +1,5 @@
use std::sync::Arc;
-use async_channel::RecvError;
-use data_types::ParquetFileParams;
use iox_catalog::interface::Catalog;
use iox_query::{exec::Executor, QueryChunkMeta};
use observability_deps::tracing::*;
@@ -14,13 +12,13 @@ use tokio::{
time::Instant,
};
-use crate::buffer_tree::partition::{persisting::PersistingData, PartitionData, SortKeyState};
-
-use super::{
- backpressure::PersistState,
- context::{Context, PersistError, PersistRequest},
+use crate::{
+ buffer_tree::partition::{persisting::PersistingData, PartitionData, SortKeyState},
+ persist::worker,
};
+use super::{backpressure::PersistState, context::PersistRequest, worker::SharedWorkerState};
+
/// A persistence task submission handle.
///
/// This type is cheap to clone to share across threads.
@@ -115,7 +113,7 @@ use super::{
#[derive(Debug, Clone)]
pub(crate) struct PersistHandle {
/// THe state/dependencies shared across all worker tasks.
- inner: Arc<Inner>,
+ worker_state: Arc<SharedWorkerState>,
/// Task handles for the worker tasks, aborted on drop of all
/// [`PersistHandle`] instances.
@@ -172,7 +170,7 @@ impl PersistHandle {
// Log the important configuration parameters of the persist subsystem.
info!(n_workers, persist_queue_depth, "initialised persist task");
- let inner = Arc::new(Inner {
+ let worker_state = Arc::new(SharedWorkerState {
exec,
store,
catalog,
@@ -186,14 +184,18 @@ impl PersistHandle {
let (tx_handles, tasks): (Vec<_>, Vec<_>) = (0..n_workers)
.map(|_| {
- let inner = Arc::clone(&inner);
+ let worker_state = Arc::clone(&worker_state);
// Initialise the worker queue that is not shared across workers
// allowing the persist code to address a single worker.
let (tx, rx) = mpsc::unbounded_channel();
(
tx,
- AbortOnDrop(tokio::spawn(run_task(inner, global_rx.clone(), rx))),
+ AbortOnDrop(tokio::spawn(worker::run_task(
+ worker_state,
+ global_rx.clone(),
+ rx,
+ ))),
)
})
.unzip();
@@ -210,7 +212,7 @@ impl PersistHandle {
(
Self {
- inner,
+ worker_state,
sem,
global_queue: global_tx,
worker_queues: Arc::new(JumpHash::new(tx_handles)),
@@ -372,131 +374,6 @@ impl<T> Drop for AbortOnDrop<T> {
}
}
-#[derive(Debug)]
-pub(super) struct Inner {
- pub(super) exec: Arc<Executor>,
- pub(super) store: ParquetStorage,
- pub(super) catalog: Arc<dyn Catalog>,
-}
-
-/// Drive a [`PersistRequest`] to completion, prioritising jobs from the
-/// worker-specific queue, and falling back to jobs from the global work queue.
-///
-/// Optimistically compacts the [`PersistingData`] using the locally cached sort
-/// key read from the [`PartitionData`] instance. If this key proves to be
-/// stale, the compaction is retried with the new key.
-///
-/// See <https://github.com/influxdata/influxdb_iox/issues/6439>.
-///
-/// ```text
-/// ┌───────┐
-/// │COMPACT│
-/// └───┬───┘
-/// ┌───▽──┐
-/// │UPLOAD│
-/// └───┬──┘
-/// _______▽________ ┌────────────────┐
-/// ╱ ╲ │TRY UPDATE │
-/// ╱ NEEDS CATALOG ╲___│CATALOG SORT KEY│
-/// ╲ SORT KEY UPDATE? ╱yes└────────┬───────┘
-/// ╲________________╱ _______▽________ ┌────────────┐
-/// │no ╱ ╲ │RESTART WITH│
-/// │ ╱ SAW CONCURRENT ╲___│NEW SORT KEY│
-/// │ ╲ SORT KEY UPDATE? ╱yes└────────────┘
-/// │ ╲________________╱
-/// │ │no
-/// └─────┬────────────────┘
-/// ┌─────▽─────┐
-/// │ADD PARQUET│
-/// │TO CATALOG │
-/// └─────┬─────┘
-/// ┌───────▽──────┐
-/// │NOTIFY PERSIST│
-/// │JOB COMPLETE │
-/// └──────────────┘
-/// ```
-async fn run_task(
- inner: Arc<Inner>,
- global_queue: async_channel::Receiver<PersistRequest>,
- mut rx: mpsc::UnboundedReceiver<PersistRequest>,
-) {
- loop {
- let req = tokio::select! {
- // Bias the channel polling to prioritise work in the
- // worker-specific queue.
- //
- // This causes the worker to do the work assigned to it specifically
- // first, falling back to taking jobs from the global queue if it
- // has no assigned work.
- //
- // This allows persist jobs to be reordered w.r.t the order in which
- // they were enqueued with queue_persist().
- biased;
-
- v = rx.recv() => {
- match v {
- Some(v) => v,
- None => {
- // The worker channel is closed.
- return
- }
- }
- }
- v = global_queue.recv() => {
- match v {
- Ok(v) => v,
- Err(RecvError) => {
- // The global channel is closed.
- return
- },
- }
- }
- };
-
- let ctx = Context::new(req, Arc::clone(&inner));
-
- // Compact the data, generate the parquet file from the result, and
- // upload it to object storage.
- //
- // If this process generated a new sort key that must be added to the
- // catalog, attempt to update the catalog with a compare-and-swap
- // operation; if this update fails due to a concurrent sort key update,
- // the compaction must be redone with the new sort key and uploaded
- // before continuing.
- let parquet_table_data = loop {
- match compact_and_upload(&ctx).await {
- Ok(v) => break v,
- Err(PersistError::ConcurrentSortKeyUpdate) => continue,
- };
- };
-
- // Make the newly uploaded parquet file visible to other nodes.
- let object_store_id = ctx.update_catalog_parquet(parquet_table_data).await;
- // And finally mark the persist job as complete and notify any
- // observers.
- ctx.mark_complete(object_store_id);
- }
-}
-
-/// Run a compaction on the [`PersistingData`], generate a parquet file and
-/// upload it to object storage.
-///
-/// If in the course of this the sort key is updated, this function attempts to
-/// update the sort key in the catalog. This MAY fail because another node has
-/// concurrently done the same and the persist must be restarted, see
-/// <https://github.com/influxdata/influxdb_iox/issues/6439>.
-async fn compact_and_upload(ctx: &Context) -> Result<ParquetFileParams, PersistError> {
- let compacted = ctx.compact().await;
- let (sort_key_update, parquet_table_data) = ctx.upload(compacted).await;
-
- if let Some(update) = sort_key_update {
- ctx.update_catalog_sort_key(update, parquet_table_data.object_store_id)
- .await?
- }
-
- Ok(parquet_table_data)
-}
-
#[cfg(test)]
mod tests {
use std::{sync::Arc, time::Duration};
diff --git a/ingester2/src/persist/mod.rs b/ingester2/src/persist/mod.rs
index 726bfe7de4..b7f719003d 100644
--- a/ingester2/src/persist/mod.rs
+++ b/ingester2/src/persist/mod.rs
@@ -3,3 +3,4 @@ pub(super) mod compact;
mod context;
pub(crate) mod handle;
pub(crate) mod hot_partitions;
+mod worker;
diff --git a/ingester2/src/persist/worker.rs b/ingester2/src/persist/worker.rs
index 20c843d8cb..d1a0f833e5 100644
--- a/ingester2/src/persist/worker.rs
+++ b/ingester2/src/persist/worker.rs
@@ -3,26 +3,13 @@ use std::sync::Arc;
use async_channel::RecvError;
use data_types::ParquetFileParams;
use iox_catalog::interface::Catalog;
-use iox_query::{exec::Executor, QueryChunkMeta};
-use observability_deps::tracing::*;
-use parking_lot::Mutex;
+use iox_query::exec::Executor;
+
use parquet_file::storage::ParquetStorage;
-use schema::sort::adjust_sort_key_columns;
-use sharder::JumpHash;
-use tokio::{
- sync::{mpsc, oneshot, Semaphore, TryAcquireError},
- time::Instant,
-};
-use crate::{
- buffer_tree::partition::{persisting::PersistingData, PartitionData, SortKeyState},
- persist::worker,
-};
+use tokio::sync::mpsc;
-use super::{
- backpressure::PersistState,
- context::{Context, PersistError, PersistRequest},
-};
+use super::context::{Context, PersistError, PersistRequest};
/// State shared across workers.
#[derive(Debug)]
@@ -69,6 +56,10 @@ pub(super) struct SharedWorkerState {
/// │JOB COMPLETE │
/// └──────────────┘
/// ```
+///
+/// [`PersistingData`]:
+/// crate::buffer_tree::partition::persisting::PersistingData
+/// [`PartitionData`]: crate::buffer_tree::partition::PartitionData
pub(super) async fn run_task(
worker_state: Arc<SharedWorkerState>,
global_queue: async_channel::Receiver<PersistRequest>,
@@ -137,10 +128,12 @@ pub(super) async fn run_task(
///
/// If in the course of this the sort key is updated, this function attempts to
/// update the sort key in the catalog. This MAY fail because another node has
-/// concurrently done the same and the persist must be restarted, see:
+/// concurrently done the same and the persist must be restarted.
///
-/// https://github.com/influxdata/influxdb_iox/issues/6439
+/// See <https://github.com/influxdata/influxdb_iox/issues/6439>.
///
+/// [`PersistingData`]:
+/// crate::buffer_tree::partition::persisting::PersistingData
async fn compact_and_upload(ctx: &Context) -> Result<ParquetFileParams, PersistError> {
let compacted = ctx.compact().await;
let (sort_key_update, parquet_table_data) = ctx.upload(compacted).await;
|
7729494f61fbdaed8d6c9a18b5726ea64b811bb3
|
Dom Dwyer
|
2022-10-18 19:37:12
|
write, query & progress API coverage
|
This commit adds a new test that exercises all major external APIs of
the ingester:
* Writing data via the write buffer
* Waiting for data to be readable via the progress API
* Querying data and and asserting the contents
This should provide basic integration coverage for the Ingester
internals. This commit also removes a similar test (though with less
coverage) that was tightly coupled to the existing buffering structures.
| null |
test: write, query & progress API coverage
This commit adds a new test that exercises all major external APIs of
the ingester:
* Writing data via the write buffer
* Waiting for data to be readable via the progress API
* Querying data and and asserting the contents
This should provide basic integration coverage for the Ingester
internals. This commit also removes a similar test (though with less
coverage) that was tightly coupled to the existing buffering structures.
|
diff --git a/ingester/src/handler.rs b/ingester/src/handler.rs
index 5c3ae23fba..27d378c09d 100644
--- a/ingester/src/handler.rs
+++ b/ingester/src/handler.rs
@@ -435,11 +435,10 @@ impl<T> Drop for IngestHandlerImpl<T> {
mod tests {
use std::{num::NonZeroU32, ops::DerefMut};
- use data_types::{Namespace, NamespaceSchema, QueryPool, Sequence, SequenceNumber};
+ use data_types::{Namespace, NamespaceSchema, Sequence, SequenceNumber};
use dml::{DmlMeta, DmlWrite};
use iox_catalog::{mem::MemCatalog, validate_or_insert_schema};
use iox_time::Time;
- use metric::{Attributes, Metric, U64Counter, U64Gauge};
use mutable_batch_lp::lines_to_batches;
use object_store::memory::InMemory;
use test_helpers::maybe_start_logging;
@@ -448,179 +447,9 @@ mod tests {
use super::*;
use crate::data::{partition::SnapshotBatch, table::TableName};
- #[tokio::test]
- async fn read_from_write_buffer_write_to_mutable_buffer() {
- let ingester = TestIngester::new().await;
-
- let schema = NamespaceSchema::new(
- ingester.namespace.id,
- ingester.topic.id,
- ingester.query_pool.id,
- 100,
- );
- let mut txn = ingester.catalog.start_transaction().await.unwrap();
- let ingest_ts1 = Time::from_timestamp_millis(42);
- let ingest_ts2 = Time::from_timestamp_millis(1337);
- let w1 = DmlWrite::new(
- "foo",
- lines_to_batches("mem foo=1 10", 0).unwrap(),
- Some("1970-01-01".into()),
- DmlMeta::sequenced(
- Sequence::new(ShardIndex::new(0), SequenceNumber::new(0)),
- ingest_ts1,
- None,
- 50,
- ),
- );
- let schema = validate_or_insert_schema(w1.tables(), &schema, txn.deref_mut())
- .await
- .unwrap()
- .unwrap();
- ingester.write_buffer_state.push_write(w1);
- let w2 = DmlWrite::new(
- "foo",
- lines_to_batches("cpu bar=2 20\ncpu bar=3 30", 0).unwrap(),
- Some("1970-01-01".into()),
- DmlMeta::sequenced(
- Sequence::new(ShardIndex::new(0), SequenceNumber::new(7)),
- ingest_ts2,
- None,
- 150,
- ),
- );
- let _schema = validate_or_insert_schema(w2.tables(), &schema, txn.deref_mut())
- .await
- .unwrap()
- .unwrap();
- ingester.write_buffer_state.push_write(w2);
- let w3 = DmlWrite::new(
- "foo",
- lines_to_batches("a b=2 200", 0).unwrap(),
- Some("1970-01-01".into()),
- DmlMeta::sequenced(
- Sequence::new(ShardIndex::new(0), SequenceNumber::new(9)),
- ingest_ts2,
- None,
- 150,
- ),
- );
- let _schema = validate_or_insert_schema(w3.tables(), &schema, txn.deref_mut())
- .await
- .unwrap()
- .unwrap();
- ingester.write_buffer_state.push_write(w3);
- txn.commit().await.unwrap();
-
- // give the writes some time to go through the buffer. Exit once we've verified there's
- // data in there from both writes.
- tokio::time::timeout(Duration::from_secs(2), async {
- let ns_name = ingester.namespace.name.into();
- let table_name = TableName::from("a");
- loop {
- let mut has_measurement = false;
-
- if let Some(data) = ingester.ingester.data.shard(ingester.shard.id) {
- if let Some(data) = data.namespace(&ns_name) {
- // verify there's data in the buffer
- if let Some((b, _)) = data.snapshot(&table_name, &"1970-01-01".into()).await
- {
- if let Some(b) = b.first() {
- if b.data.num_rows() > 0 {
- has_measurement = true;
- }
- }
- }
- }
- }
-
- // and ensure that the shard state was actually updated
- let shard = ingester
- .catalog
- .repositories()
- .await
- .shards()
- .create_or_get(&ingester.topic, ingester.shard_index)
- .await
- .unwrap();
-
- if has_measurement
- && shard.min_unpersisted_sequence_number == SequenceNumber::new(9)
- {
- break;
- }
-
- tokio::time::sleep(Duration::from_millis(200)).await;
- }
- })
- .await
- .expect("timeout");
-
- let observation = ingester
- .metrics
- .get_instrument::<Metric<DurationHistogram>>("ingester_op_apply_duration")
- .unwrap()
- .get_observer(&Attributes::from(&[
- ("kafka_topic", "whatevs"),
- ("kafka_partition", "0"),
- ("result", "success"),
- ]))
- .unwrap()
- .fetch();
- let hits = observation.buckets.iter().map(|b| b.count).sum::<u64>();
- assert_eq!(hits, 3);
-
- let observation = ingester
- .metrics
- .get_instrument::<Metric<U64Counter>>("ingester_write_buffer_read_bytes")
- .unwrap()
- .get_observer(&Attributes::from(&[
- ("kafka_topic", "whatevs"),
- ("kafka_partition", "0"),
- ]))
- .unwrap()
- .fetch();
- assert_eq!(observation, 350);
-
- let observation = ingester
- .metrics
- .get_instrument::<Metric<U64Gauge>>("ingester_write_buffer_last_sequence_number")
- .unwrap()
- .get_observer(&Attributes::from(&[
- ("kafka_topic", "whatevs"),
- ("kafka_partition", "0"),
- ]))
- .unwrap()
- .fetch();
- assert_eq!(observation, 9);
-
- let observation = ingester
- .metrics
- .get_instrument::<Metric<U64Gauge>>("ingester_write_buffer_sequence_number_lag")
- .unwrap()
- .get_observer(&Attributes::from(&[
- ("kafka_topic", "whatevs"),
- ("kafka_partition", "0"),
- ]))
- .unwrap()
- .fetch();
- assert_eq!(observation, 0);
-
- let observation = ingester
- .metrics
- .get_instrument::<Metric<U64Gauge>>("ingester_write_buffer_last_ingest_ts")
- .unwrap()
- .get_observer(&Attributes::from(&[
- ("kafka_topic", "whatevs"),
- ("kafka_partition", "0"),
- ]))
- .unwrap()
- .fetch();
- assert_eq!(observation, ingest_ts2.timestamp_nanos() as u64);
- }
-
#[tokio::test]
async fn test_shutdown() {
- let ingester = TestIngester::new().await.ingester;
+ let (ingester, _, _) = ingester_test_setup(vec![], 0, true).await;
// does not exit w/o shutdown
tokio::select! {
@@ -638,7 +467,7 @@ mod tests {
#[tokio::test]
#[should_panic(expected = "Background worker 'bad_task' exited early!")]
async fn test_join_task_early_shutdown() {
- let mut ingester = TestIngester::new().await.ingester;
+ let (mut ingester, _, _) = ingester_test_setup(vec![], 0, true).await;
let shutdown_task = tokio::spawn(async {
// It does nothing! and stops.
@@ -655,7 +484,7 @@ mod tests {
#[tokio::test]
#[should_panic(expected = "JoinError::Panic")]
async fn test_join_task_panic() {
- let mut ingester = TestIngester::new().await.ingester;
+ let (mut ingester, _, _) = ingester_test_setup(vec![], 0, true).await;
let shutdown_task = tokio::spawn(async {
panic!("bananas");
@@ -946,7 +775,7 @@ mod tests {
#[tokio::test]
async fn limits_concurrent_queries() {
- let mut ingester = TestIngester::new().await;
+ let (mut ingester, _, _) = ingester_test_setup(vec![], 0, true).await;
let request = IngesterQueryRequest {
namespace: "foo".to_string(),
table: "cpu".to_string(),
@@ -954,93 +783,14 @@ mod tests {
predicate: None,
};
- let res = ingester.ingester.query(request.clone()).await.unwrap_err();
+ let res = ingester.query(request.clone()).await.unwrap_err();
assert!(matches!(
res,
crate::querier_handler::Error::NamespaceNotFound { .. }
));
- ingester.ingester.request_sem = Semaphore::new(0);
- let res = ingester.ingester.query(request).await.unwrap_err();
+ ingester.request_sem = Semaphore::new(0);
+ let res = ingester.query(request).await.unwrap_err();
assert!(matches!(res, crate::querier_handler::Error::RequestLimit));
}
-
- struct TestIngester {
- catalog: Arc<dyn Catalog>,
- shard: Shard,
- namespace: Namespace,
- topic: TopicMetadata,
- shard_index: ShardIndex,
- query_pool: QueryPool,
- metrics: Arc<metric::Registry>,
- write_buffer_state: MockBufferSharedState,
- ingester: IngestHandlerImpl,
- }
-
- impl TestIngester {
- async fn new() -> Self {
- let metrics: Arc<metric::Registry> = Default::default();
- let catalog: Arc<dyn Catalog> = Arc::new(MemCatalog::new(Arc::clone(&metrics)));
-
- let mut txn = catalog.start_transaction().await.unwrap();
- let topic = txn.topics().create_or_get("whatevs").await.unwrap();
- let query_pool = txn.query_pools().create_or_get("whatevs").await.unwrap();
- let shard_index = ShardIndex::new(0);
- let namespace = txn
- .namespaces()
- .create("foo", "inf", topic.id, query_pool.id)
- .await
- .unwrap();
- let shard = txn
- .shards()
- .create_or_get(&topic, shard_index)
- .await
- .unwrap();
- txn.commit().await.unwrap();
-
- let mut shard_states = BTreeMap::new();
- shard_states.insert(shard_index, shard);
-
- let write_buffer_state =
- MockBufferSharedState::empty_with_n_shards(NonZeroU32::try_from(1).unwrap());
- let reading: Arc<dyn WriteBufferReading> =
- Arc::new(MockBufferForReading::new(write_buffer_state.clone(), None).unwrap());
- let object_store = Arc::new(InMemory::new());
-
- let lifecycle_config = LifecycleConfig::new(
- 1000000,
- 1000,
- 1000,
- Duration::from_secs(10),
- Duration::from_secs(10),
- 10000000,
- );
- let ingester = IngestHandlerImpl::new(
- lifecycle_config,
- topic.clone(),
- shard_states,
- Arc::clone(&catalog),
- object_store,
- reading,
- Arc::new(Executor::new(1)),
- Arc::clone(&metrics),
- false,
- 1,
- )
- .await
- .unwrap();
-
- Self {
- catalog,
- shard,
- namespace,
- topic,
- shard_index,
- query_pool,
- metrics,
- write_buffer_state,
- ingester,
- }
- }
- }
}
diff --git a/ingester/tests/write.rs b/ingester/tests/write.rs
new file mode 100644
index 0000000000..243cb75ea2
--- /dev/null
+++ b/ingester/tests/write.rs
@@ -0,0 +1,119 @@
+mod common;
+
+use arrow_util::assert_batches_sorted_eq;
+pub use common::*;
+use data_types::PartitionKey;
+use generated_types::ingester::IngesterQueryRequest;
+use iox_time::{SystemProvider, TimeProvider};
+use metric::{DurationHistogram, U64Counter, U64Gauge};
+
+// Write data to an ingester through the write buffer interface, utilise the
+// progress API to wait for it to become readable, and finally query the data
+// and validate the contents.
+#[tokio::test]
+async fn test_write_query() {
+ let mut ctx = TestContext::new().await;
+
+ ctx.ensure_namespace("test_namespace").await;
+
+ // Initial write
+ let partition_key = PartitionKey::from("1970-01-01");
+ ctx.write_lp(
+ "test_namespace",
+ "bananas greatness=\"unbounded\" 10",
+ partition_key.clone(),
+ 0,
+ )
+ .await;
+
+ // A subsequent write with a non-contiguous sequence number to a different table.
+ ctx.write_lp(
+ "test_namespace",
+ "cpu bar=2 20\ncpu bar=3 30",
+ partition_key.clone(),
+ 7,
+ )
+ .await;
+
+ // And a third write that appends more data to the table in the initial
+ // write.
+ let offset = ctx
+ .write_lp(
+ "test_namespace",
+ "bananas count=42 200",
+ partition_key.clone(),
+ 42,
+ )
+ .await;
+
+ ctx.wait_for_readable(offset).await;
+
+ // Perform a query to validate the actual data buffered.
+ let data = ctx
+ .query(IngesterQueryRequest {
+ namespace: "test_namespace".to_string(),
+ table: "bananas".to_string(),
+ columns: vec![],
+ predicate: None,
+ })
+ .await
+ .expect("query should succeed")
+ .into_record_batches()
+ .await;
+
+ let expected = vec![
+ "+-------+-----------+--------------------------------+",
+ "| count | greatness | time |",
+ "+-------+-----------+--------------------------------+",
+ "| | unbounded | 1970-01-01T00:00:00.000000010Z |",
+ "| 42 | | 1970-01-01T00:00:00.000000200Z |",
+ "+-------+-----------+--------------------------------+",
+ ];
+ assert_batches_sorted_eq!(&expected, &data);
+
+ // Assert various ingest metrics.
+ let hist = ctx
+ .get_metric::<DurationHistogram, _>(
+ "ingester_op_apply_duration",
+ &[
+ ("kafka_topic", TEST_TOPIC_NAME),
+ ("kafka_partition", "0"),
+ ("result", "success"),
+ ],
+ )
+ .fetch();
+ assert_eq!(hist.sample_count(), 3);
+
+ let metric = ctx
+ .get_metric::<U64Counter, _>(
+ "ingester_write_buffer_read_bytes",
+ &[("kafka_topic", TEST_TOPIC_NAME), ("kafka_partition", "0")],
+ )
+ .fetch();
+ assert_eq!(metric, 150);
+
+ let metric = ctx
+ .get_metric::<U64Gauge, _>(
+ "ingester_write_buffer_last_sequence_number",
+ &[("kafka_topic", TEST_TOPIC_NAME), ("kafka_partition", "0")],
+ )
+ .fetch();
+ assert_eq!(metric, 42);
+
+ let metric = ctx
+ .get_metric::<U64Gauge, _>(
+ "ingester_write_buffer_sequence_number_lag",
+ &[("kafka_topic", TEST_TOPIC_NAME), ("kafka_partition", "0")],
+ )
+ .fetch();
+ assert_eq!(metric, 0);
+
+ let metric = ctx
+ .get_metric::<U64Gauge, _>(
+ "ingester_write_buffer_last_ingest_ts",
+ &[("kafka_topic", TEST_TOPIC_NAME), ("kafka_partition", "0")],
+ )
+ .fetch();
+ let now = SystemProvider::new().now();
+ assert!(metric < now.timestamp_nanos() as _);
+}
|
4ca869fcd9a6a863b3ebcb4a18c9fbbf117152ea
|
Marco Neumann
|
2022-10-24 07:19:51
|
textwrap 0.15.1 was yanked (#5953)
|
Need to pull an unreleased clap v3 so we no longer depend on a yanked
version of textwrap. Also see https://github.com/clap-rs/clap/issues/4418
| null |
fix: textwrap 0.15.1 was yanked (#5953)
Need to pull an unreleased clap v3 so we no longer depend on a yanked
version of textwrap. Also see https://github.com/clap-rs/clap/issues/4418
|
diff --git a/Cargo.lock b/Cargo.lock
index 9a95262f68..92e455cb1d 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -610,8 +610,7 @@ dependencies = [
[[package]]
name = "clap"
version = "3.2.22"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "86447ad904c7fb335a790c9d7fe3d0d971dc523b8ccd1561a520de9a85302750"
+source = "git+https://github.com/crepererum/clap.git?branch=crepererum/issue4418#6ca8aca7ba656f06358440e95ada08019073e1a5"
dependencies = [
"bitflags",
"clap_lex 0.2.4",
@@ -676,8 +675,7 @@ dependencies = [
[[package]]
name = "clap_lex"
version = "0.2.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2850f2f5a82cbf437dd5af4d49848fbdfc27c157c3d010345776f952765261c5"
+source = "git+https://github.com/crepererum/clap.git?branch=crepererum/issue4418#6ca8aca7ba656f06358440e95ada08019073e1a5"
dependencies = [
"os_str_bytes",
]
@@ -5089,9 +5087,9 @@ dependencies = [
[[package]]
name = "textwrap"
-version = "0.15.1"
+version = "0.16.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "949517c0cf1bf4ee812e2e07e08ab448e3ae0d23472aee8a06c985f0c8815b16"
+checksum = "222a222a5bfe1bba4a77b45ec488a741b3cb8872e5e499451fd7d0129c9c7c3d"
[[package]]
name = "thiserror"
diff --git a/Cargo.toml b/Cargo.toml
index d339a8d499..c695314888 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -126,3 +126,7 @@ opt-level = 3
[profile.dev.package.similar]
opt-level = 3
+
+[patch.crates-io]
+# See https://github.com/clap-rs/clap/issues/4418
+clap3 = { git = "https://github.com/crepererum/clap.git", branch = "crepererum/issue4418", package = "clap" }
|
3eb48ef210002aa32410aa2920633b787e7eeb62
|
Andrew Lamb
|
2023-07-21 07:20:36
|
Update datafusion again (#8247)
|
* chore: Update datafusion to get new grouping
* chore: Update for new API
* chore: update tests
* fix: new API
* fix: state type
---------
|
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
chore: Update datafusion again (#8247)
* chore: Update datafusion to get new grouping
* chore: Update for new API
* chore: update tests
* fix: new API
* fix: state type
---------
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
diff --git a/Cargo.lock b/Cargo.lock
index 26b00fb984..c579c2d9a5 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -1367,7 +1367,7 @@ dependencies = [
[[package]]
name = "datafusion"
version = "27.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=46182c894e5106adba7fb53e9848ce666fb6129b#46182c894e5106adba7fb53e9848ce666fb6129b"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=44008d71180f2d03e9d21944788e61cb8845abc7#44008d71180f2d03e9d21944788e61cb8845abc7"
dependencies = [
"ahash",
"arrow",
@@ -1415,7 +1415,7 @@ dependencies = [
[[package]]
name = "datafusion-common"
version = "27.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=46182c894e5106adba7fb53e9848ce666fb6129b#46182c894e5106adba7fb53e9848ce666fb6129b"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=44008d71180f2d03e9d21944788e61cb8845abc7#44008d71180f2d03e9d21944788e61cb8845abc7"
dependencies = [
"arrow",
"arrow-array",
@@ -1429,7 +1429,7 @@ dependencies = [
[[package]]
name = "datafusion-execution"
version = "27.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=46182c894e5106adba7fb53e9848ce666fb6129b#46182c894e5106adba7fb53e9848ce666fb6129b"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=44008d71180f2d03e9d21944788e61cb8845abc7#44008d71180f2d03e9d21944788e61cb8845abc7"
dependencies = [
"dashmap",
"datafusion-common",
@@ -1446,7 +1446,7 @@ dependencies = [
[[package]]
name = "datafusion-expr"
version = "27.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=46182c894e5106adba7fb53e9848ce666fb6129b#46182c894e5106adba7fb53e9848ce666fb6129b"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=44008d71180f2d03e9d21944788e61cb8845abc7#44008d71180f2d03e9d21944788e61cb8845abc7"
dependencies = [
"ahash",
"arrow",
@@ -1460,7 +1460,7 @@ dependencies = [
[[package]]
name = "datafusion-optimizer"
version = "27.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=46182c894e5106adba7fb53e9848ce666fb6129b#46182c894e5106adba7fb53e9848ce666fb6129b"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=44008d71180f2d03e9d21944788e61cb8845abc7#44008d71180f2d03e9d21944788e61cb8845abc7"
dependencies = [
"arrow",
"async-trait",
@@ -1477,7 +1477,7 @@ dependencies = [
[[package]]
name = "datafusion-physical-expr"
version = "27.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=46182c894e5106adba7fb53e9848ce666fb6129b#46182c894e5106adba7fb53e9848ce666fb6129b"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=44008d71180f2d03e9d21944788e61cb8845abc7#44008d71180f2d03e9d21944788e61cb8845abc7"
dependencies = [
"ahash",
"arrow",
@@ -1498,6 +1498,7 @@ dependencies = [
"itertools 0.11.0",
"lazy_static",
"libc",
+ "log",
"md-5",
"paste",
"petgraph",
@@ -1511,7 +1512,7 @@ dependencies = [
[[package]]
name = "datafusion-proto"
version = "27.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=46182c894e5106adba7fb53e9848ce666fb6129b#46182c894e5106adba7fb53e9848ce666fb6129b"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=44008d71180f2d03e9d21944788e61cb8845abc7#44008d71180f2d03e9d21944788e61cb8845abc7"
dependencies = [
"arrow",
"chrono",
@@ -1525,7 +1526,7 @@ dependencies = [
[[package]]
name = "datafusion-row"
version = "27.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=46182c894e5106adba7fb53e9848ce666fb6129b#46182c894e5106adba7fb53e9848ce666fb6129b"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=44008d71180f2d03e9d21944788e61cb8845abc7#44008d71180f2d03e9d21944788e61cb8845abc7"
dependencies = [
"arrow",
"datafusion-common",
@@ -1536,7 +1537,7 @@ dependencies = [
[[package]]
name = "datafusion-sql"
version = "27.0.0"
-source = "git+https://github.com/apache/arrow-datafusion.git?rev=46182c894e5106adba7fb53e9848ce666fb6129b#46182c894e5106adba7fb53e9848ce666fb6129b"
+source = "git+https://github.com/apache/arrow-datafusion.git?rev=44008d71180f2d03e9d21944788e61cb8845abc7#44008d71180f2d03e9d21944788e61cb8845abc7"
dependencies = [
"arrow",
"arrow-schema",
diff --git a/Cargo.toml b/Cargo.toml
index 4f38b08896..95909dfe86 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -121,8 +121,8 @@ license = "MIT OR Apache-2.0"
[workspace.dependencies]
arrow = { version = "43.0.0" }
arrow-flight = { version = "43.0.0" }
-datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "46182c894e5106adba7fb53e9848ce666fb6129b", default-features = false }
-datafusion-proto = { git = "https://github.com/apache/arrow-datafusion.git", rev = "46182c894e5106adba7fb53e9848ce666fb6129b" }
+datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "44008d71180f2d03e9d21944788e61cb8845abc7", default-features = false }
+datafusion-proto = { git = "https://github.com/apache/arrow-datafusion.git", rev = "44008d71180f2d03e9d21944788e61cb8845abc7" }
hashbrown = { version = "0.14.0" }
object_store = { version = "0.6.0" }
diff --git a/influxdb_iox/tests/end_to_end_cases/flightsql.rs b/influxdb_iox/tests/end_to_end_cases/flightsql.rs
index 47988b5d5e..7553bbf0fe 100644
--- a/influxdb_iox/tests/end_to_end_cases/flightsql.rs
+++ b/influxdb_iox/tests/end_to_end_cases/flightsql.rs
@@ -1,8 +1,8 @@
-use std::{collections::HashMap, path::PathBuf, sync::Arc};
+use std::path::PathBuf;
use arrow::{
array::as_generic_binary_array,
- datatypes::{DataType, Fields, Schema, SchemaRef, TimeUnit},
+ datatypes::{DataType, Schema, TimeUnit},
record_batch::RecordBatch,
};
use arrow_flight::{
@@ -1592,10 +1592,7 @@ async fn assert_schema(client: &mut FlightClient, cmd: Any) {
let mut saw_data = false;
while let Some(batch) = result_stream.try_next().await.unwrap() {
saw_data = true;
- // strip metadata (GetFlightInfo doesn't include metadata for
- // some reason) before comparison
- // https://github.com/influxdata/influxdb_iox/issues/7282
- let batch_schema = strip_metadata(&batch.schema());
+ let batch_schema = batch.schema();
assert_eq!(
batch_schema.as_ref(),
&flight_info_schema,
@@ -1603,10 +1600,6 @@ async fn assert_schema(client: &mut FlightClient, cmd: Any) {
);
// The stream itself also may report a schema
if let Some(stream_schema) = result_stream.schema() {
- // strip metadata (GetFlightInfo doesn't include metadata for
- // some reason) before comparison
- // https://github.com/influxdata/influxdb_iox/issues/7282
- let stream_schema = strip_metadata(stream_schema);
assert_eq!(stream_schema.as_ref(), &flight_info_schema);
}
}
@@ -1615,16 +1608,6 @@ async fn assert_schema(client: &mut FlightClient, cmd: Any) {
assert!(saw_data);
}
-fn strip_metadata(schema: &Schema) -> SchemaRef {
- let stripped_fields: Fields = schema
- .fields()
- .iter()
- .map(|f| f.as_ref().clone().with_metadata(HashMap::new()))
- .collect();
-
- Arc::new(Schema::new(stripped_fields))
-}
-
#[tokio::test]
async fn authz() {
test_helpers::maybe_start_logging();
diff --git a/iox_query_influxql/src/aggregate/percentile.rs b/iox_query_influxql/src/aggregate/percentile.rs
index 4e7f37b318..5c09612f2f 100644
--- a/iox_query_influxql/src/aggregate/percentile.rs
+++ b/iox_query_influxql/src/aggregate/percentile.rs
@@ -39,7 +39,7 @@ pub(super) fn accumulator(dt: &DataType) -> Result<Box<dyn Accumulator>> {
/// Calculate the intermediate merge state for the aggregator.
pub(super) fn state_type(dt: &DataType) -> Result<Arc<Vec<DataType>>> {
Ok(Arc::new(vec![
- DataType::List(Arc::new(Field::new("state", dt.clone(), false))),
+ DataType::List(Arc::new(Field::new("item", dt.clone(), true))),
DataType::Float64,
]))
}
diff --git a/iox_query_influxql/src/plan/util_copy.rs b/iox_query_influxql/src/plan/util_copy.rs
index 563b6e2a93..b9e0dc6cb9 100644
--- a/iox_query_influxql/src/plan/util_copy.rs
+++ b/iox_query_influxql/src/plan/util_copy.rs
@@ -145,34 +145,27 @@ where
negated,
expr,
pattern,
+ case_insensitive,
escape_char,
}) => Ok(Expr::Like(Like::new(
*negated,
Box::new(clone_with_replacement(expr, replacement_fn)?),
Box::new(clone_with_replacement(pattern, replacement_fn)?),
*escape_char,
- ))),
- Expr::ILike(Like {
- negated,
- expr,
- pattern,
- escape_char,
- }) => Ok(Expr::ILike(Like::new(
- *negated,
- Box::new(clone_with_replacement(expr, replacement_fn)?),
- Box::new(clone_with_replacement(pattern, replacement_fn)?),
- *escape_char,
+ *case_insensitive,
))),
Expr::SimilarTo(Like {
negated,
expr,
pattern,
+ case_insensitive,
escape_char,
}) => Ok(Expr::SimilarTo(Like::new(
*negated,
Box::new(clone_with_replacement(expr, replacement_fn)?),
Box::new(clone_with_replacement(pattern, replacement_fn)?),
*escape_char,
+ *case_insensitive,
))),
Expr::Case(case) => Ok(Expr::Case(Case::new(
match &case.expr {
diff --git a/iox_query_influxql/src/window/non_negative.rs b/iox_query_influxql/src/window/non_negative.rs
index 0af5ac6432..e5e876b282 100644
--- a/iox_query_influxql/src/window/non_negative.rs
+++ b/iox_query_influxql/src/window/non_negative.rs
@@ -21,17 +21,6 @@ struct NonNegative {
}
impl PartitionEvaluator for NonNegative {
- fn update_state(
- &mut self,
- state: &WindowAggState,
- idx: usize,
- range_columns: &[Arc<dyn Array>],
- sort_partition_points: &[Range<usize>],
- ) -> Result<()> {
- self.partition_evaluator
- .update_state(state, idx, range_columns, sort_partition_points)
- }
-
fn memoize(&mut self, state: &mut WindowAggState) -> Result<()> {
self.partition_evaluator.memoize(state)
}
diff --git a/predicate/src/lib.rs b/predicate/src/lib.rs
index aa6c509574..fe590f760b 100644
--- a/predicate/src/lib.rs
+++ b/predicate/src/lib.rs
@@ -496,7 +496,6 @@ impl TreeNodeVisitor for RowBasedVisitor {
| Expr::Column(_)
| Expr::Exists { .. }
| Expr::GetIndexedField { .. }
- | Expr::ILike { .. }
| Expr::InList { .. }
| Expr::InSubquery { .. }
| Expr::IsFalse(_)
diff --git a/predicate/src/rpc_predicate/rewrite.rs b/predicate/src/rpc_predicate/rewrite.rs
index 69cf116ec1..463732b7b5 100644
--- a/predicate/src/rpc_predicate/rewrite.rs
+++ b/predicate/src/rpc_predicate/rewrite.rs
@@ -515,6 +515,7 @@ mod tests {
expr,
pattern,
escape_char: None,
+ case_insensitive: false,
})
}
diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml
index 0357ff1df4..5d2b5ae333 100644
--- a/workspace-hack/Cargo.toml
+++ b/workspace-hack/Cargo.toml
@@ -28,9 +28,9 @@ bytes = { version = "1" }
chrono = { version = "0.4", default-features = false, features = ["alloc", "clock", "serde"] }
crossbeam-utils = { version = "0.8" }
crypto-common = { version = "0.1", default-features = false, features = ["std"] }
-datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "46182c894e5106adba7fb53e9848ce666fb6129b" }
-datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "46182c894e5106adba7fb53e9848ce666fb6129b", default-features = false, features = ["crypto_expressions", "regex_expressions", "unicode_expressions"] }
-datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "46182c894e5106adba7fb53e9848ce666fb6129b", default-features = false, features = ["crypto_expressions", "encoding_expressions", "regex_expressions", "unicode_expressions"] }
+datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "44008d71180f2d03e9d21944788e61cb8845abc7" }
+datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "44008d71180f2d03e9d21944788e61cb8845abc7", default-features = false, features = ["crypto_expressions", "regex_expressions", "unicode_expressions"] }
+datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "44008d71180f2d03e9d21944788e61cb8845abc7", default-features = false, features = ["crypto_expressions", "encoding_expressions", "regex_expressions", "unicode_expressions"] }
digest = { version = "0.10", features = ["mac", "std"] }
either = { version = "1", features = ["serde"] }
fixedbitset = { version = "0.4" }
|
f780aba353abbf754b0821b04358057542ad2a80
|
Nga Tran
|
2023-03-21 14:57:10
|
set max_l0_created_at to reasonable values for the tests and al… (#7286)
|
* test: set max_l0_created_at to reasonable values for the tests and also verify it using both test layout and catalog function
* fix: typo
---------
|
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
test: set max_l0_created_at to reasonable values for the tests and al… (#7286)
* test: set max_l0_created_at to reasonable values for the tests and also verify it using both test layout and catalog function
* fix: typo
---------
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
diff --git a/compactor2/src/components/split_or_compact/split_compact.rs b/compactor2/src/components/split_or_compact/split_compact.rs
index cb096af1b7..21853191dc 100644
--- a/compactor2/src/components/split_or_compact/split_compact.rs
+++ b/compactor2/src/components/split_or_compact/split_compact.rs
@@ -45,7 +45,7 @@ impl SplitOrCompact for SplitCompact {
/// (2).If split is not needed which also means the split was needed and done in previous round,
/// pick files to compact that under max_compact_size limit. Mostly after the split above
/// done in previous round, we will be able to do this because start level and
- /// target level time ranges are aligned
+ /// target level time ranges are aligned
/// (3).If the smallest possible set to compact is still over size limit, split over-size files.
/// This will be any large files of start-level or target-level. We expect this split is very rare
/// and the goal is to reduce the size for us to move forward, hence the split time will make e
diff --git a/compactor2/tests/layouts/large_files.rs b/compactor2/tests/layouts/large_files.rs
index 0fa310f12d..9c19124625 100644
--- a/compactor2/tests/layouts/large_files.rs
+++ b/compactor2/tests/layouts/large_files.rs
@@ -3,6 +3,7 @@
//! See [crate::layout] module for detailed documentation
use data_types::CompactionLevel;
+use iox_time::Time;
use crate::layouts::{layout_setup_builder, parquet_builder, run_layout_scenario, ONE_MB};
@@ -111,6 +112,7 @@ async fn one_larger_max_compact_size() {
.with_min_time(1)
.with_max_time(1000)
.with_compaction_level(CompactionLevel::FileNonOverlapped)
+ .with_max_l0_created_at(Time::from_timestamp_nanos(5))
// file > max_desired_file_size_bytes
.with_file_size_bytes((max_compact_size + 1) as u64),
)
@@ -123,14 +125,14 @@ async fn one_larger_max_compact_size() {
---
- "**** Input Files "
- "L1, all files 300mb "
- - "L1.1[1,1000] 1ns |------------------------------------------L1.1------------------------------------------|"
- - "WARNING: file L1.1[1,1000] 1ns 300mb exceeds soft limit 100mb by more than 50%"
+ - "L1.1[1,1000] 5ns |------------------------------------------L1.1------------------------------------------|"
+ - "WARNING: file L1.1[1,1000] 5ns 300mb exceeds soft limit 100mb by more than 50%"
- "Committing partition 1:"
- " Upgrading 1 files level to CompactionLevel::L2: L1.1"
- "**** Final Output Files "
- "L2, all files 300mb "
- - "L2.1[1,1000] 1ns |------------------------------------------L2.1------------------------------------------|"
- - "WARNING: file L2.1[1,1000] 1ns 300mb exceeds soft limit 100mb by more than 50%"
+ - "L2.1[1,1000] 5ns |------------------------------------------L2.1------------------------------------------|"
+ - "WARNING: file L2.1[1,1000] 5ns 300mb exceeds soft limit 100mb by more than 50%"
"###
);
}
@@ -155,6 +157,7 @@ async fn one_l0_larger_max_compact_size() {
.with_min_time(1)
.with_max_time(1000)
.with_compaction_level(CompactionLevel::Initial)
+ .with_max_l0_created_at(Time::from_timestamp_nanos(5))
// file > max_desired_file_size_bytes
.with_file_size_bytes((max_compact_size + 1) as u64),
)
@@ -167,16 +170,16 @@ async fn one_l0_larger_max_compact_size() {
---
- "**** Input Files "
- "L0, all files 300mb "
- - "L0.1[1,1000] 1ns |------------------------------------------L0.1------------------------------------------|"
- - "WARNING: file L0.1[1,1000] 1ns 300mb exceeds soft limit 100mb by more than 50%"
+ - "L0.1[1,1000] 5ns |------------------------------------------L0.1------------------------------------------|"
+ - "WARNING: file L0.1[1,1000] 5ns 300mb exceeds soft limit 100mb by more than 50%"
- "Committing partition 1:"
- " Upgrading 1 files level to CompactionLevel::L1: L0.1"
- "Committing partition 1:"
- " Upgrading 1 files level to CompactionLevel::L2: L1.1"
- "**** Final Output Files "
- "L2, all files 300mb "
- - "L2.1[1,1000] 1ns |------------------------------------------L2.1------------------------------------------|"
- - "WARNING: file L2.1[1,1000] 1ns 300mb exceeds soft limit 100mb by more than 50%"
+ - "L2.1[1,1000] 5ns |------------------------------------------L2.1------------------------------------------|"
+ - "WARNING: file L2.1[1,1000] 5ns 300mb exceeds soft limit 100mb by more than 50%"
"###
);
}
@@ -203,6 +206,8 @@ async fn two_large_files_total_under_max_compact_size() {
.with_min_time(i)
.with_max_time(1000)
.with_compaction_level(CompactionLevel::try_from(i as i32).unwrap())
+ // max_l0_created_at of larger level is set to be smaller
+ .with_max_l0_created_at(Time::from_timestamp_nanos(10 - i))
.with_file_size_bytes(size),
)
.await;
@@ -214,25 +219,25 @@ async fn two_large_files_total_under_max_compact_size() {
---
- "**** Input Files "
- "L1, all files 100mb "
- - "L1.1[1,1000] 1ns |------------------------------------------L1.1------------------------------------------|"
+ - "L1.1[1,1000] 9ns |------------------------------------------L1.1------------------------------------------|"
- "L2, all files 100mb "
- - "L2.2[2,1000] 1ns |-----------------------------------------L2.2------------------------------------------| "
+ - "L2.2[2,1000] 8ns |-----------------------------------------L2.2------------------------------------------| "
- "**** Simulation run 0, type=split(split_times=[501]). 2 Input Files, 200mb total:"
- "L1, all files 100mb "
- - "L1.1[1,1000] 1ns |------------------------------------------L1.1------------------------------------------|"
+ - "L1.1[1,1000] 9ns |------------------------------------------L1.1------------------------------------------|"
- "L2, all files 100mb "
- - "L2.2[2,1000] 1ns |-----------------------------------------L2.2------------------------------------------| "
+ - "L2.2[2,1000] 8ns |-----------------------------------------L2.2------------------------------------------| "
- "**** 2 Output Files (parquet_file_id not yet assigned), 200mb total:"
- "L2 "
- - "L2.?[1,501] 1ns 100.1mb |-------------------L2.?--------------------| "
- - "L2.?[502,1000] 1ns 99.9mb |-------------------L2.?-------------------| "
+ - "L2.?[1,501] 9ns 100.1mb |-------------------L2.?--------------------| "
+ - "L2.?[502,1000] 9ns 99.9mb |-------------------L2.?-------------------| "
- "Committing partition 1:"
- " Soft Deleting 2 files: L1.1, L2.2"
- " Creating 2 files"
- "**** Final Output Files "
- "L2 "
- - "L2.3[1,501] 1ns 100.1mb |-------------------L2.3--------------------| "
- - "L2.4[502,1000] 1ns 99.9mb |-------------------L2.4-------------------| "
+ - "L2.3[1,501] 9ns 100.1mb |-------------------L2.3--------------------| "
+ - "L2.4[502,1000] 9ns 99.9mb |-------------------L2.4-------------------| "
"###
);
}
@@ -260,6 +265,8 @@ async fn two_large_files_total_over_max_compact_size() {
.with_max_time(1000)
// L1.1 or L2.2
.with_compaction_level(CompactionLevel::try_from(i as i32).unwrap())
+ // max_l0_created_at of larger level is set to be smaller
+ .with_max_l0_created_at(Time::from_timestamp_nanos(10 - i))
.with_file_size_bytes(size as u64),
)
.await;
@@ -271,58 +278,58 @@ async fn two_large_files_total_over_max_compact_size() {
---
- "**** Input Files "
- "L1, all files 150mb "
- - "L1.1[1,1000] 1ns |------------------------------------------L1.1------------------------------------------|"
+ - "L1.1[1,1000] 9ns |------------------------------------------L1.1------------------------------------------|"
- "L2, all files 150mb "
- - "L2.2[2,1000] 1ns |-----------------------------------------L2.2------------------------------------------| "
- - "WARNING: file L1.1[1,1000] 1ns 150mb exceeds soft limit 100mb by more than 50%"
- - "WARNING: file L2.2[2,1000] 1ns 150mb exceeds soft limit 100mb by more than 50%"
+ - "L2.2[2,1000] 8ns |-----------------------------------------L2.2------------------------------------------| "
+ - "WARNING: file L1.1[1,1000] 9ns 150mb exceeds soft limit 100mb by more than 50%"
+ - "WARNING: file L2.2[2,1000] 8ns 150mb exceeds soft limit 100mb by more than 50%"
- "**** Simulation run 0, type=split(split_times=[667]). 1 Input Files, 150mb total:"
- "L1, all files 150mb "
- - "L1.1[1,1000] 1ns |------------------------------------------L1.1------------------------------------------|"
+ - "L1.1[1,1000] 9ns |------------------------------------------L1.1------------------------------------------|"
- "**** 2 Output Files (parquet_file_id not yet assigned), 150mb total:"
- "L1 "
- - "L1.?[1,667] 1ns 100mb |---------------------------L1.?---------------------------| "
- - "L1.?[668,1000] 1ns 50mb |-----------L1.?------------| "
+ - "L1.?[1,667] 9ns 100mb |---------------------------L1.?---------------------------| "
+ - "L1.?[668,1000] 9ns 50mb |-----------L1.?------------| "
- "**** Simulation run 1, type=split(split_times=[668]). 1 Input Files, 150mb total:"
- "L2, all files 150mb "
- - "L2.2[2,1000] 1ns |------------------------------------------L2.2------------------------------------------|"
+ - "L2.2[2,1000] 8ns |------------------------------------------L2.2------------------------------------------|"
- "**** 2 Output Files (parquet_file_id not yet assigned), 150mb total:"
- "L2 "
- - "L2.?[2,668] 1ns 100.1mb |---------------------------L2.?---------------------------| "
- - "L2.?[669,1000] 1ns 49.9mb |-----------L2.?------------| "
+ - "L2.?[2,668] 8ns 100.1mb |---------------------------L2.?---------------------------| "
+ - "L2.?[669,1000] 8ns 49.9mb |-----------L2.?------------| "
- "Committing partition 1:"
- " Soft Deleting 2 files: L1.1, L2.2"
- " Creating 4 files"
- "**** Simulation run 2, type=split(split_times=[668]). 1 Input Files, 50mb total:"
- "L1, all files 50mb "
- - "L1.4[668,1000] 1ns |------------------------------------------L1.4------------------------------------------|"
+ - "L1.4[668,1000] 9ns |------------------------------------------L1.4------------------------------------------|"
- "**** 2 Output Files (parquet_file_id not yet assigned), 50mb total:"
- "L1 "
- - "L1.?[668,668] 1ns 0b |L1.?| "
- - "L1.?[669,1000] 1ns 50mb |-----------------------------------------L1.?------------------------------------------| "
+ - "L1.?[668,668] 9ns 0b |L1.?| "
+ - "L1.?[669,1000] 9ns 50mb |-----------------------------------------L1.?------------------------------------------| "
- "Committing partition 1:"
- " Soft Deleting 1 files: L1.4"
- " Creating 2 files"
- "**** Simulation run 3, type=split(split_times=[335]). 3 Input Files, 200.1mb total:"
- "L1 "
- - "L1.3[1,667] 1ns 100mb |-----------------------------------------L1.3------------------------------------------| "
- - "L1.7[668,668] 1ns 0b |L1.7|"
+ - "L1.3[1,667] 9ns 100mb |-----------------------------------------L1.3------------------------------------------| "
+ - "L1.7[668,668] 9ns 0b |L1.7|"
- "L2 "
- - "L2.5[2,668] 1ns 100.1mb |-----------------------------------------L2.5------------------------------------------| "
+ - "L2.5[2,668] 8ns 100.1mb |-----------------------------------------L2.5------------------------------------------| "
- "**** 2 Output Files (parquet_file_id not yet assigned), 200.1mb total:"
- "L2 "
- - "L2.?[1,335] 1ns 100.2mb |-------------------L2.?--------------------| "
- - "L2.?[336,668] 1ns 99.9mb |-------------------L2.?-------------------| "
+ - "L2.?[1,335] 9ns 100.2mb |-------------------L2.?--------------------| "
+ - "L2.?[336,668] 9ns 99.9mb |-------------------L2.?-------------------| "
- "Committing partition 1:"
- " Soft Deleting 3 files: L1.3, L2.5, L1.7"
- " Creating 2 files"
- "**** Final Output Files "
- "L1 "
- - "L1.8[669,1000] 1ns 50mb |-----------L1.8------------| "
+ - "L1.8[669,1000] 9ns 50mb |-----------L1.8------------| "
- "L2 "
- - "L2.6[669,1000] 1ns 49.9mb |-----------L2.6------------| "
- - "L2.9[1,335] 1ns 100.2mb |------------L2.9------------| "
- - "L2.10[336,668] 1ns 99.9mb |-----------L2.10-----------| "
+ - "L2.6[669,1000] 8ns 49.9mb |-----------L2.6------------| "
+ - "L2.9[1,335] 9ns 100.2mb |------------L2.9------------| "
+ - "L2.10[336,668] 9ns 99.9mb |-----------L2.10-----------| "
"###
);
}
@@ -351,6 +358,8 @@ async fn two_large_files_total_over_max_compact_size_small_overlap_range() {
.with_max_time(1000)
// L1.1 or L2.2
.with_compaction_level(CompactionLevel::try_from(i as i32).unwrap())
+ // max_l0_created_at of larger level is set to be smaller
+ .with_max_l0_created_at(Time::from_timestamp_nanos(10 - i))
.with_file_size_bytes(size as u64),
)
.await;
@@ -362,47 +371,47 @@ async fn two_large_files_total_over_max_compact_size_small_overlap_range() {
---
- "**** Input Files "
- "L1, all files 150mb "
- - "L1.1[0,1000] 1ns |------------------------------------------L1.1------------------------------------------|"
+ - "L1.1[0,1000] 9ns |------------------------------------------L1.1------------------------------------------|"
- "L2, all files 150mb "
- - "L2.2[800,1000] 1ns |------L2.2------|"
- - "WARNING: file L1.1[0,1000] 1ns 150mb exceeds soft limit 100mb by more than 50%"
- - "WARNING: file L2.2[800,1000] 1ns 150mb exceeds soft limit 100mb by more than 50%"
+ - "L2.2[800,1000] 8ns |------L2.2------|"
+ - "WARNING: file L1.1[0,1000] 9ns 150mb exceeds soft limit 100mb by more than 50%"
+ - "WARNING: file L2.2[800,1000] 8ns 150mb exceeds soft limit 100mb by more than 50%"
- "**** Simulation run 0, type=split(split_times=[667]). 1 Input Files, 150mb total:"
- "L1, all files 150mb "
- - "L1.1[0,1000] 1ns |------------------------------------------L1.1------------------------------------------|"
+ - "L1.1[0,1000] 9ns |------------------------------------------L1.1------------------------------------------|"
- "**** 2 Output Files (parquet_file_id not yet assigned), 150mb total:"
- "L1 "
- - "L1.?[0,667] 1ns 100.05mb |---------------------------L1.?---------------------------| "
- - "L1.?[668,1000] 1ns 49.95mb |-----------L1.?------------| "
+ - "L1.?[0,667] 9ns 100.05mb |---------------------------L1.?---------------------------| "
+ - "L1.?[668,1000] 9ns 49.95mb |-----------L1.?------------| "
- "**** Simulation run 1, type=split(split_times=[934]). 1 Input Files, 150mb total:"
- "L2, all files 150mb "
- - "L2.2[800,1000] 1ns |------------------------------------------L2.2------------------------------------------|"
+ - "L2.2[800,1000] 8ns |------------------------------------------L2.2------------------------------------------|"
- "**** 2 Output Files (parquet_file_id not yet assigned), 150mb total:"
- "L2 "
- - "L2.?[800,934] 1ns 100.5mb|---------------------------L2.?---------------------------| "
- - "L2.?[935,1000] 1ns 49.5mb |-----------L2.?------------| "
+ - "L2.?[800,934] 8ns 100.5mb|---------------------------L2.?---------------------------| "
+ - "L2.?[935,1000] 8ns 49.5mb |-----------L2.?------------| "
- "Committing partition 1:"
- " Soft Deleting 2 files: L1.1, L2.2"
- " Creating 4 files"
- "**** Simulation run 2, type=split(split_times=[835]). 3 Input Files, 199.95mb total:"
- "L1 "
- - "L1.4[668,1000] 1ns 49.95mb|------------------------------------------L1.4------------------------------------------|"
+ - "L1.4[668,1000] 9ns 49.95mb|------------------------------------------L1.4------------------------------------------|"
- "L2 "
- - "L2.5[800,934] 1ns 100.5mb |---------------L2.5---------------| "
- - "L2.6[935,1000] 1ns 49.5mb |-----L2.6------| "
+ - "L2.5[800,934] 8ns 100.5mb |---------------L2.5---------------| "
+ - "L2.6[935,1000] 8ns 49.5mb |-----L2.6------| "
- "**** 2 Output Files (parquet_file_id not yet assigned), 199.95mb total:"
- "L2 "
- - "L2.?[668,835] 1ns 100.58mb|-------------------L2.?--------------------| "
- - "L2.?[836,1000] 1ns 99.37mb |-------------------L2.?-------------------| "
+ - "L2.?[668,835] 9ns 100.58mb|-------------------L2.?--------------------| "
+ - "L2.?[836,1000] 9ns 99.37mb |-------------------L2.?-------------------| "
- "Committing partition 1:"
- " Soft Deleting 3 files: L1.4, L2.5, L2.6"
- " Upgrading 1 files level to CompactionLevel::L2: L1.3"
- " Creating 2 files"
- "**** Final Output Files "
- "L2 "
- - "L2.3[0,667] 1ns 100.05mb |---------------------------L2.3---------------------------| "
- - "L2.7[668,835] 1ns 100.58mb |----L2.7-----| "
- - "L2.8[836,1000] 1ns 99.37mb |----L2.8----| "
+ - "L2.3[0,667] 9ns 100.05mb |---------------------------L2.3---------------------------| "
+ - "L2.7[668,835] 9ns 100.58mb |----L2.7-----| "
+ - "L2.8[836,1000] 9ns 99.37mb |----L2.8----| "
"###
);
}
@@ -432,6 +441,8 @@ async fn two_large_files_total_over_max_compact_size_small_overlap_range_2() {
.with_max_time((i + 1) * 1000)
// L1.1 or L2.2
.with_compaction_level(CompactionLevel::try_from(i as i32).unwrap())
+ // max_l0_created_at of larger level is set to be smaller
+ .with_max_l0_created_at(Time::from_timestamp_nanos(10 - i))
.with_file_size_bytes(size as u64),
)
.await;
@@ -443,48 +454,48 @@ async fn two_large_files_total_over_max_compact_size_small_overlap_range_2() {
---
- "**** Input Files "
- "L1, all files 150mb "
- - "L1.1[800,2000] 1ns |---------------------L1.1----------------------| "
+ - "L1.1[800,2000] 9ns |---------------------L1.1----------------------| "
- "L2, all files 150mb "
- - "L2.2[1600,3000] 1ns |-------------------------L2.2--------------------------| "
- - "WARNING: file L1.1[800,2000] 1ns 150mb exceeds soft limit 100mb by more than 50%"
- - "WARNING: file L2.2[1600,3000] 1ns 150mb exceeds soft limit 100mb by more than 50%"
+ - "L2.2[1600,3000] 8ns |-------------------------L2.2--------------------------| "
+ - "WARNING: file L1.1[800,2000] 9ns 150mb exceeds soft limit 100mb by more than 50%"
+ - "WARNING: file L2.2[1600,3000] 8ns 150mb exceeds soft limit 100mb by more than 50%"
- "**** Simulation run 0, type=split(split_times=[1600]). 1 Input Files, 150mb total:"
- "L1, all files 150mb "
- - "L1.1[800,2000] 1ns |------------------------------------------L1.1------------------------------------------|"
+ - "L1.1[800,2000] 9ns |------------------------------------------L1.1------------------------------------------|"
- "**** 2 Output Files (parquet_file_id not yet assigned), 150mb total:"
- "L1 "
- - "L1.?[800,1600] 1ns 100mb |---------------------------L1.?---------------------------| "
- - "L1.?[1601,2000] 1ns 50mb |-----------L1.?------------| "
+ - "L1.?[800,1600] 9ns 100mb |---------------------------L1.?---------------------------| "
+ - "L1.?[1601,2000] 9ns 50mb |-----------L1.?------------| "
- "**** Simulation run 1, type=split(split_times=[2534]). 1 Input Files, 150mb total:"
- "L2, all files 150mb "
- - "L2.2[1600,3000] 1ns |------------------------------------------L2.2------------------------------------------|"
+ - "L2.2[1600,3000] 8ns |------------------------------------------L2.2------------------------------------------|"
- "**** 2 Output Files (parquet_file_id not yet assigned), 150mb total:"
- "L2 "
- - "L2.?[1600,2534] 1ns 100.07mb|---------------------------L2.?---------------------------| "
- - "L2.?[2535,3000] 1ns 49.93mb |-----------L2.?------------| "
+ - "L2.?[1600,2534] 8ns 100.07mb|---------------------------L2.?---------------------------| "
+ - "L2.?[2535,3000] 8ns 49.93mb |-----------L2.?------------| "
- "Committing partition 1:"
- " Soft Deleting 2 files: L1.1, L2.2"
- " Creating 4 files"
- "**** Simulation run 2, type=split(split_times=[1494, 2188]). 3 Input Files, 250.07mb total:"
- "L1 "
- - "L1.4[1601,2000] 1ns 50mb |-------L1.4-------| "
- - "L1.3[800,1600] 1ns 100mb |-----------------L1.3------------------| "
+ - "L1.4[1601,2000] 9ns 50mb |-------L1.4-------| "
+ - "L1.3[800,1600] 9ns 100mb |-----------------L1.3------------------| "
- "L2 "
- - "L2.5[1600,2534] 1ns 100.07mb |---------------------L2.5---------------------| "
+ - "L2.5[1600,2534] 8ns 100.07mb |---------------------L2.5---------------------| "
- "**** 3 Output Files (parquet_file_id not yet assigned), 250.07mb total:"
- "L2 "
- - "L2.?[800,1494] 1ns 100.09mb|---------------L2.?---------------| "
- - "L2.?[1495,2188] 1ns 99.94mb |--------------L2.?---------------| "
- - "L2.?[2189,2534] 1ns 50.04mb |-----L2.?------| "
+ - "L2.?[800,1494] 9ns 100.09mb|---------------L2.?---------------| "
+ - "L2.?[1495,2188] 9ns 99.94mb |--------------L2.?---------------| "
+ - "L2.?[2189,2534] 9ns 50.04mb |-----L2.?------| "
- "Committing partition 1:"
- " Soft Deleting 3 files: L1.3, L1.4, L2.5"
- " Creating 3 files"
- "**** Final Output Files "
- "L2 "
- - "L2.6[2535,3000] 1ns 49.93mb |------L2.6-------| "
- - "L2.7[800,1494] 1ns 100.09mb|-----------L2.7-----------| "
- - "L2.8[1495,2188] 1ns 99.94mb |-----------L2.8-----------| "
- - "L2.9[2189,2534] 1ns 50.04mb |----L2.9----| "
+ - "L2.6[2535,3000] 8ns 49.93mb |------L2.6-------| "
+ - "L2.7[800,1494] 9ns 100.09mb|-----------L2.7-----------| "
+ - "L2.8[1495,2188] 9ns 99.94mb |-----------L2.8-----------| "
+ - "L2.9[2189,2534] 9ns 50.04mb |----L2.9----| "
"###
);
}
@@ -514,6 +525,8 @@ async fn two_large_files_total_over_max_compact_size_small_overlap_range_3() {
.with_max_time((i - 1) * 1000 + 300)
// L1.1 or L2.2
.with_compaction_level(CompactionLevel::try_from(i as i32).unwrap())
+ // max_l0_created_at of larger level is set to be smaller
+ .with_max_l0_created_at(Time::from_timestamp_nanos(10 - i))
.with_file_size_bytes(size as u64),
)
.await;
@@ -525,48 +538,48 @@ async fn two_large_files_total_over_max_compact_size_small_overlap_range_3() {
---
- "**** Input Files "
- "L1, all files 150mb "
- - "L1.1[0,300] 1ns |-------L1.1-------| "
+ - "L1.1[0,300] 9ns |-------L1.1-------| "
- "L2, all files 150mb "
- - "L2.2[200,1300] 1ns |-----------------------------------L2.2-----------------------------------| "
- - "WARNING: file L1.1[0,300] 1ns 150mb exceeds soft limit 100mb by more than 50%"
- - "WARNING: file L2.2[200,1300] 1ns 150mb exceeds soft limit 100mb by more than 50%"
+ - "L2.2[200,1300] 8ns |-----------------------------------L2.2-----------------------------------| "
+ - "WARNING: file L1.1[0,300] 9ns 150mb exceeds soft limit 100mb by more than 50%"
+ - "WARNING: file L2.2[200,1300] 8ns 150mb exceeds soft limit 100mb by more than 50%"
- "**** Simulation run 0, type=split(split_times=[200]). 1 Input Files, 150mb total:"
- "L1, all files 150mb "
- - "L1.1[0,300] 1ns |------------------------------------------L1.1------------------------------------------|"
+ - "L1.1[0,300] 9ns |------------------------------------------L1.1------------------------------------------|"
- "**** 2 Output Files (parquet_file_id not yet assigned), 150mb total:"
- "L1 "
- - "L1.?[0,200] 1ns 100mb |---------------------------L1.?---------------------------| "
- - "L1.?[201,300] 1ns 50mb |-----------L1.?------------| "
+ - "L1.?[0,200] 9ns 100mb |---------------------------L1.?---------------------------| "
+ - "L1.?[201,300] 9ns 50mb |-----------L1.?------------| "
- "**** Simulation run 1, type=split(split_times=[934]). 1 Input Files, 150mb total:"
- "L2, all files 150mb "
- - "L2.2[200,1300] 1ns |------------------------------------------L2.2------------------------------------------|"
+ - "L2.2[200,1300] 8ns |------------------------------------------L2.2------------------------------------------|"
- "**** 2 Output Files (parquet_file_id not yet assigned), 150mb total:"
- "L2 "
- - "L2.?[200,934] 1ns 100.09mb|---------------------------L2.?---------------------------| "
- - "L2.?[935,1300] 1ns 49.91mb |-----------L2.?------------| "
+ - "L2.?[200,934] 8ns 100.09mb|---------------------------L2.?---------------------------| "
+ - "L2.?[935,1300] 8ns 49.91mb |-----------L2.?------------| "
- "Committing partition 1:"
- " Soft Deleting 2 files: L1.1, L2.2"
- " Creating 4 files"
- "**** Simulation run 2, type=split(split_times=[374, 748]). 3 Input Files, 250.09mb total:"
- "L1 "
- - "L1.4[201,300] 1ns 50mb |-L1.4--| "
- - "L1.3[0,200] 1ns 100mb |------L1.3-------| "
+ - "L1.4[201,300] 9ns 50mb |-L1.4--| "
+ - "L1.3[0,200] 9ns 100mb |------L1.3-------| "
- "L2 "
- - "L2.5[200,934] 1ns 100.09mb |--------------------------------L2.5--------------------------------| "
+ - "L2.5[200,934] 8ns 100.09mb |--------------------------------L2.5--------------------------------| "
- "**** 3 Output Files (parquet_file_id not yet assigned), 250.09mb total:"
- "L2 "
- - "L2.?[0,374] 1ns 100.14mb |---------------L2.?---------------| "
- - "L2.?[375,748] 1ns 99.88mb |--------------L2.?---------------| "
- - "L2.?[749,934] 1ns 50.07mb |-----L2.?------| "
+ - "L2.?[0,374] 9ns 100.14mb |---------------L2.?---------------| "
+ - "L2.?[375,748] 9ns 99.88mb |--------------L2.?---------------| "
+ - "L2.?[749,934] 9ns 50.07mb |-----L2.?------| "
- "Committing partition 1:"
- " Soft Deleting 3 files: L1.3, L1.4, L2.5"
- " Creating 3 files"
- "**** Final Output Files "
- "L2 "
- - "L2.6[935,1300] 1ns 49.91mb |---------L2.6----------| "
- - "L2.7[0,374] 1ns 100.14mb |---------L2.7----------| "
- - "L2.8[375,748] 1ns 99.88mb |---------L2.8----------| "
- - "L2.9[749,934] 1ns 50.07mb |---L2.9---| "
+ - "L2.6[935,1300] 8ns 49.91mb |---------L2.6----------| "
+ - "L2.7[0,374] 9ns 100.14mb |---------L2.7----------| "
+ - "L2.8[375,748] 9ns 99.88mb |---------L2.8----------| "
+ - "L2.9[749,934] 9ns 50.07mb |---L2.9---| "
"###
);
}
@@ -593,6 +606,8 @@ async fn two_large_files_total_over_max_compact_size_start_l0() {
parquet_builder()
.with_min_time(i)
.with_max_time(1000)
+ // time of L0 larger than time of L1
+ .with_max_l0_created_at(Time::from_timestamp_nanos(10 - i))
// L0.1 or L1.2
.with_compaction_level(CompactionLevel::try_from(i as i32).unwrap())
.with_file_size_bytes(size as u64),
@@ -606,72 +621,88 @@ async fn two_large_files_total_over_max_compact_size_start_l0() {
---
- "**** Input Files "
- "L0, all files 150mb "
- - "L0.1[0,1000] 1ns |------------------------------------------L0.1------------------------------------------|"
+ - "L0.1[0,1000] 10ns |------------------------------------------L0.1------------------------------------------|"
- "L1, all files 150mb "
- - "L1.2[1,1000] 1ns |-----------------------------------------L1.2------------------------------------------| "
- - "WARNING: file L0.1[0,1000] 1ns 150mb exceeds soft limit 100mb by more than 50%"
- - "WARNING: file L1.2[1,1000] 1ns 150mb exceeds soft limit 100mb by more than 50%"
+ - "L1.2[1,1000] 9ns |-----------------------------------------L1.2------------------------------------------| "
+ - "WARNING: file L0.1[0,1000] 10ns 150mb exceeds soft limit 100mb by more than 50%"
+ - "WARNING: file L1.2[1,1000] 9ns 150mb exceeds soft limit 100mb by more than 50%"
- "**** Simulation run 0, type=split(split_times=[667]). 1 Input Files, 150mb total:"
- "L0, all files 150mb "
- - "L0.1[0,1000] 1ns |------------------------------------------L0.1------------------------------------------|"
+ - "L0.1[0,1000] 10ns |------------------------------------------L0.1------------------------------------------|"
- "**** 2 Output Files (parquet_file_id not yet assigned), 150mb total:"
- "L0 "
- - "L0.?[0,667] 1ns 100.05mb |---------------------------L0.?---------------------------| "
- - "L0.?[668,1000] 1ns 49.95mb |-----------L0.?------------| "
+ - "L0.?[0,667] 10ns 100.05mb|---------------------------L0.?---------------------------| "
+ - "L0.?[668,1000] 10ns 49.95mb |-----------L0.?------------| "
- "**** Simulation run 1, type=split(split_times=[667]). 1 Input Files, 150mb total:"
- "L1, all files 150mb "
- - "L1.2[1,1000] 1ns |------------------------------------------L1.2------------------------------------------|"
+ - "L1.2[1,1000] 9ns |------------------------------------------L1.2------------------------------------------|"
- "**** 2 Output Files (parquet_file_id not yet assigned), 150mb total:"
- "L1 "
- - "L1.?[1,667] 1ns 100mb |---------------------------L1.?---------------------------| "
- - "L1.?[668,1000] 1ns 50mb |-----------L1.?------------| "
+ - "L1.?[1,667] 9ns 100mb |---------------------------L1.?---------------------------| "
+ - "L1.?[668,1000] 9ns 50mb |-----------L1.?------------| "
- "Committing partition 1:"
- " Soft Deleting 2 files: L0.1, L1.2"
- " Creating 4 files"
- "**** Simulation run 2, type=split(split_times=[933]). 2 Input Files, 99.95mb total:"
- "L0 "
- - "L0.4[668,1000] 1ns 49.95mb|------------------------------------------L0.4------------------------------------------|"
+ - "L0.4[668,1000] 10ns 49.95mb|------------------------------------------L0.4------------------------------------------|"
- "L1 "
- - "L1.6[668,1000] 1ns 50mb |------------------------------------------L1.6------------------------------------------|"
+ - "L1.6[668,1000] 9ns 50mb |------------------------------------------L1.6------------------------------------------|"
- "**** 2 Output Files (parquet_file_id not yet assigned), 99.95mb total:"
- "L1 "
- - "L1.?[668,933] 1ns 79.78mb|--------------------------------L1.?---------------------------------| "
- - "L1.?[934,1000] 1ns 20.17mb |-----L1.?------| "
+ - "L1.?[668,933] 10ns 79.78mb|--------------------------------L1.?---------------------------------| "
+ - "L1.?[934,1000] 10ns 20.17mb |-----L1.?------| "
- "Committing partition 1:"
- " Soft Deleting 2 files: L0.4, L1.6"
- " Creating 2 files"
- "**** Simulation run 3, type=split(split_times=[334]). 2 Input Files, 200.05mb total:"
- "L0 "
- - "L0.3[0,667] 1ns 100.05mb |------------------------------------------L0.3------------------------------------------|"
+ - "L0.3[0,667] 10ns 100.05mb|------------------------------------------L0.3------------------------------------------|"
- "L1 "
- - "L1.5[1,667] 1ns 100mb |-----------------------------------------L1.5------------------------------------------| "
+ - "L1.5[1,667] 9ns 100mb |-----------------------------------------L1.5------------------------------------------| "
- "**** 2 Output Files (parquet_file_id not yet assigned), 200.05mb total:"
- "L1 "
- - "L1.?[0,334] 1ns 100.17mb |-------------------L1.?--------------------| "
- - "L1.?[335,667] 1ns 99.88mb |-------------------L1.?-------------------| "
+ - "L1.?[0,334] 10ns 100.17mb|-------------------L1.?--------------------| "
+ - "L1.?[335,667] 10ns 99.88mb |-------------------L1.?-------------------| "
- "Committing partition 1:"
- " Soft Deleting 2 files: L0.3, L1.5"
- " Creating 2 files"
- "**** Simulation run 4, type=split(split_times=[668]). 3 Input Files, 199.83mb total:"
- "L1 "
- - "L1.7[668,933] 1ns 79.78mb |--------------L1.7---------------| "
- - "L1.8[934,1000] 1ns 20.17mb |-L1.8-| "
- - "L1.10[335,667] 1ns 99.88mb|------------------L1.10-------------------| "
+ - "L1.7[668,933] 10ns 79.78mb |--------------L1.7---------------| "
+ - "L1.8[934,1000] 10ns 20.17mb |-L1.8-| "
+ - "L1.10[335,667] 10ns 99.88mb|------------------L1.10-------------------| "
- "**** 2 Output Files (parquet_file_id not yet assigned), 199.83mb total:"
- "L2 "
- - "L2.?[335,668] 1ns 100.06mb|-------------------L2.?--------------------| "
- - "L2.?[669,1000] 1ns 99.76mb |-------------------L2.?-------------------| "
+ - "L2.?[335,668] 10ns 100.06mb|-------------------L2.?--------------------| "
+ - "L2.?[669,1000] 10ns 99.76mb |-------------------L2.?-------------------| "
- "Committing partition 1:"
- " Soft Deleting 3 files: L1.7, L1.8, L1.10"
- " Upgrading 1 files level to CompactionLevel::L2: L1.9"
- " Creating 2 files"
- "**** Final Output Files "
- "L2 "
- - "L2.9[0,334] 1ns 100.17mb |------------L2.9------------| "
- - "L2.11[335,668] 1ns 100.06mb |-----------L2.11-----------| "
- - "L2.12[669,1000] 1ns 99.76mb |-----------L2.12-----------| "
+ - "L2.9[0,334] 10ns 100.17mb|------------L2.9------------| "
+ - "L2.11[335,668] 10ns 100.06mb |-----------L2.11-----------| "
+ - "L2.12[669,1000] 10ns 99.76mb |-----------L2.12-----------| "
"###
);
+
+ // Read all 12 files including the soft deleted ones
+ let output_files = setup.list_by_table().await;
+ assert_eq!(output_files.len(), 12);
+
+ // Sort the files by id
+ let mut output_files = output_files;
+ output_files.sort_by(|a, b| a.id.cmp(&b.id));
+
+ // Verify all L0 files created by splitting must have value of max_l0_created_at 10 which is the value of the riginal L0
+ // Note: this test make created_test deterministic and 1 which we do not care much about
+ for file in &output_files {
+ if file.compaction_level == CompactionLevel::Initial {
+ assert_eq!(file.max_l0_created_at.get(), 10);
+ }
+ }
}
// Real-life case with three good size L1s and one very large L2
@@ -701,6 +732,8 @@ async fn target_too_large_1() {
.with_min_time(1)
.with_max_time(1000)
.with_compaction_level(CompactionLevel::Final)
+ // level-2 file has small max_l0_created_at, 5
+ .with_max_l0_created_at(Time::from_timestamp_nanos(5))
.with_file_size_bytes(l2_size),
)
.await;
@@ -714,6 +747,9 @@ async fn target_too_large_1() {
.with_min_time(i * 1000 + 1)
.with_max_time(i * 1000 + 1000)
.with_compaction_level(CompactionLevel::FileNonOverlapped)
+ // level-1 files, each has different max_l0_created_at and >= 10
+ // simulate to have L1 with larger time range having smaller max_l0_created_at which is a rare use case
+ .with_max_l0_created_at(Time::from_timestamp_nanos(10 + (10 - i)))
.with_file_size_bytes(l1_sizes[i as usize]),
)
.await;
@@ -725,59 +761,59 @@ async fn target_too_large_1() {
---
- "**** Input Files "
- "L1 "
- - "L1.2[1,1000] 1ns 53mb |-----------L1.2------------| "
- - "L1.3[1001,2000] 1ns 45mb |-----------L1.3------------| "
- - "L1.4[2001,3000] 1ns 5mb |-----------L1.4------------| "
+ - "L1.2[1,1000] 20ns 53mb |-----------L1.2------------| "
+ - "L1.3[1001,2000] 19ns 45mb |-----------L1.3------------| "
+ - "L1.4[2001,3000] 18ns 5mb |-----------L1.4------------| "
- "L2 "
- - "L2.1[1,1000] 1ns 253mb |-----------L2.1------------| "
- - "WARNING: file L2.1[1,1000] 1ns 253mb exceeds soft limit 100mb by more than 50%"
+ - "L2.1[1,1000] 5ns 253mb |-----------L2.1------------| "
+ - "WARNING: file L2.1[1,1000] 5ns 253mb exceeds soft limit 100mb by more than 50%"
- "**** Simulation run 0, type=split(split_times=[396, 791]). 1 Input Files, 253mb total:"
- "L2, all files 253mb "
- - "L2.1[1,1000] 1ns |------------------------------------------L2.1------------------------------------------|"
+ - "L2.1[1,1000] 5ns |------------------------------------------L2.1------------------------------------------|"
- "**** 3 Output Files (parquet_file_id not yet assigned), 253mb total:"
- "L2 "
- - "L2.?[1,396] 1ns 100.04mb |--------------L2.?---------------| "
- - "L2.?[397,791] 1ns 99.78mb |--------------L2.?---------------| "
- - "L2.?[792,1000] 1ns 53.18mb |------L2.?------| "
+ - "L2.?[1,396] 5ns 100.04mb |--------------L2.?---------------| "
+ - "L2.?[397,791] 5ns 99.78mb |--------------L2.?---------------| "
+ - "L2.?[792,1000] 5ns 53.18mb |------L2.?------| "
- "Committing partition 1:"
- " Soft Deleting 1 files: L2.1"
- " Creating 3 files"
- "**** Simulation run 1, type=split(split_times=[396, 791]). 1 Input Files, 53mb total:"
- "L1, all files 53mb "
- - "L1.2[1,1000] 1ns |------------------------------------------L1.2------------------------------------------|"
+ - "L1.2[1,1000] 20ns |------------------------------------------L1.2------------------------------------------|"
- "**** 3 Output Files (parquet_file_id not yet assigned), 53mb total:"
- "L1 "
- - "L1.?[1,396] 1ns 20.96mb |--------------L1.?---------------| "
- - "L1.?[397,791] 1ns 20.9mb |--------------L1.?---------------| "
- - "L1.?[792,1000] 1ns 11.14mb |------L1.?------| "
+ - "L1.?[1,396] 20ns 20.96mb |--------------L1.?---------------| "
+ - "L1.?[397,791] 20ns 20.9mb |--------------L1.?---------------| "
+ - "L1.?[792,1000] 20ns 11.14mb |------L1.?------| "
- "Committing partition 1:"
- " Soft Deleting 1 files: L1.2"
- " Creating 3 files"
- "**** Simulation run 2, type=split(split_times=[328, 655]). 4 Input Files, 241.68mb total:"
- "L1 "
- - "L1.8[1,396] 1ns 20.96mb |-------------------L1.8-------------------| "
- - "L1.9[397,791] 1ns 20.9mb |-------------------L1.9-------------------| "
+ - "L1.8[1,396] 20ns 20.96mb |-------------------L1.8-------------------| "
+ - "L1.9[397,791] 20ns 20.9mb |-------------------L1.9-------------------| "
- "L2 "
- - "L2.5[1,396] 1ns 100.04mb |-------------------L2.5-------------------| "
- - "L2.6[397,791] 1ns 99.78mb |-------------------L2.6-------------------| "
+ - "L2.5[1,396] 5ns 100.04mb |-------------------L2.5-------------------| "
+ - "L2.6[397,791] 5ns 99.78mb |-------------------L2.6-------------------| "
- "**** 3 Output Files (parquet_file_id not yet assigned), 241.68mb total:"
- "L2 "
- - "L2.?[1,328] 1ns 100.04mb |---------------L2.?----------------| "
- - "L2.?[329,655] 1ns 99.73mb |---------------L2.?----------------| "
- - "L2.?[656,791] 1ns 41.91mb |----L2.?-----| "
+ - "L2.?[1,328] 20ns 100.04mb|---------------L2.?----------------| "
+ - "L2.?[329,655] 20ns 99.73mb |---------------L2.?----------------| "
+ - "L2.?[656,791] 20ns 41.91mb |----L2.?-----| "
- "Committing partition 1:"
- " Soft Deleting 4 files: L2.5, L2.6, L1.8, L1.9"
- " Creating 3 files"
- "**** Final Output Files "
- "L1 "
- - "L1.3[1001,2000] 1ns 45mb |-----------L1.3------------| "
- - "L1.4[2001,3000] 1ns 5mb |-----------L1.4------------| "
- - "L1.10[792,1000] 1ns 11.14mb |L1.10| "
+ - "L1.3[1001,2000] 19ns 45mb |-----------L1.3------------| "
+ - "L1.4[2001,3000] 18ns 5mb |-----------L1.4------------| "
+ - "L1.10[792,1000] 20ns 11.14mb |L1.10| "
- "L2 "
- - "L2.7[792,1000] 1ns 53.18mb |L2.7| "
- - "L2.11[1,328] 1ns 100.04mb|-L2.11-| "
- - "L2.12[329,655] 1ns 99.73mb |-L2.12-| "
- - "L2.13[656,791] 1ns 41.91mb |L2.13| "
+ - "L2.7[792,1000] 5ns 53.18mb |L2.7| "
+ - "L2.11[1,328] 20ns 100.04mb|-L2.11-| "
+ - "L2.12[329,655] 20ns 99.73mb |-L2.12-| "
+ - "L2.13[656,791] 20ns 41.91mb |L2.13| "
"###
);
}
@@ -809,6 +845,8 @@ async fn target_too_large_2() {
.with_min_time(1)
.with_max_time(3000)
.with_compaction_level(CompactionLevel::Final)
+ // level-2 file has small max_l0_created_at
+ .with_max_l0_created_at(Time::from_timestamp_nanos(5))
.with_file_size_bytes(l2_size),
)
.await;
@@ -822,6 +860,9 @@ async fn target_too_large_2() {
.with_min_time(i * 1000 + 1)
.with_max_time(i * 1000 + 1000)
.with_compaction_level(CompactionLevel::FileNonOverlapped)
+ // level-1 each has different max_l0_created_at and larger than level-2 one
+ // set smaller time range wiht smaller max_l0_created_at which is the common use case
+ .with_max_l0_created_at(Time::from_timestamp_nanos(10 + i))
.with_file_size_bytes(l1_sizes[i as usize]),
)
.await;
@@ -833,53 +874,53 @@ async fn target_too_large_2() {
---
- "**** Input Files "
- "L1 "
- - "L1.2[1,1000] 1ns 69mb |-----------L1.2------------| "
- - "L1.3[1001,2000] 1ns 50mb |-----------L1.3------------| "
+ - "L1.2[1,1000] 10ns 69mb |-----------L1.2------------| "
+ - "L1.3[1001,2000] 11ns 50mb |-----------L1.3------------| "
- "L2 "
- - "L2.1[1,3000] 1ns 232mb |------------------------------------------L2.1------------------------------------------|"
- - "WARNING: file L2.1[1,3000] 1ns 232mb exceeds soft limit 100mb by more than 50%"
+ - "L2.1[1,3000] 5ns 232mb |------------------------------------------L2.1------------------------------------------|"
+ - "WARNING: file L2.1[1,3000] 5ns 232mb exceeds soft limit 100mb by more than 50%"
- "**** Simulation run 0, type=split(split_times=[1294, 2587]). 1 Input Files, 232mb total:"
- "L2, all files 232mb "
- - "L2.1[1,3000] 1ns |------------------------------------------L2.1------------------------------------------|"
+ - "L2.1[1,3000] 5ns |------------------------------------------L2.1------------------------------------------|"
- "**** 3 Output Files (parquet_file_id not yet assigned), 232mb total:"
- "L2 "
- - "L2.?[1,1294] 1ns 100.03mb|----------------L2.?----------------| "
- - "L2.?[1295,2587] 1ns 99.95mb |----------------L2.?----------------| "
- - "L2.?[2588,3000] 1ns 32.03mb |---L2.?---| "
+ - "L2.?[1,1294] 5ns 100.03mb|----------------L2.?----------------| "
+ - "L2.?[1295,2587] 5ns 99.95mb |----------------L2.?----------------| "
+ - "L2.?[2588,3000] 5ns 32.03mb |---L2.?---| "
- "Committing partition 1:"
- " Soft Deleting 1 files: L2.1"
- " Creating 3 files"
- "**** Simulation run 1, type=split(split_times=[1294]). 1 Input Files, 50mb total:"
- "L1, all files 50mb "
- - "L1.3[1001,2000] 1ns |------------------------------------------L1.3------------------------------------------|"
+ - "L1.3[1001,2000] 11ns |------------------------------------------L1.3------------------------------------------|"
- "**** 2 Output Files (parquet_file_id not yet assigned), 50mb total:"
- "L1 "
- - "L1.?[1001,1294] 1ns 14.66mb|----------L1.?----------| "
- - "L1.?[1295,2000] 1ns 35.34mb |----------------------------L1.?-----------------------------| "
+ - "L1.?[1001,1294] 11ns 14.66mb|----------L1.?----------| "
+ - "L1.?[1295,2000] 11ns 35.34mb |----------------------------L1.?-----------------------------| "
- "Committing partition 1:"
- " Soft Deleting 1 files: L1.3"
- " Creating 2 files"
- "**** Simulation run 2, type=split(split_times=[705]). 3 Input Files, 183.69mb total:"
- "L1 "
- - "L1.2[1,1000] 1ns 69mb |-------------------------------L1.2--------------------------------| "
- - "L1.7[1001,1294] 1ns 14.66mb |-------L1.7-------| "
+ - "L1.2[1,1000] 10ns 69mb |-------------------------------L1.2--------------------------------| "
+ - "L1.7[1001,1294] 11ns 14.66mb |-------L1.7-------| "
- "L2 "
- - "L2.4[1,1294] 1ns 100.03mb|------------------------------------------L2.4------------------------------------------|"
+ - "L2.4[1,1294] 5ns 100.03mb|------------------------------------------L2.4------------------------------------------|"
- "**** 2 Output Files (parquet_file_id not yet assigned), 183.69mb total:"
- "L2 "
- - "L2.?[1,705] 1ns 100.01mb |---------------------L2.?----------------------| "
- - "L2.?[706,1294] 1ns 83.68mb |-----------------L2.?-----------------| "
+ - "L2.?[1,705] 11ns 100.01mb|---------------------L2.?----------------------| "
+ - "L2.?[706,1294] 11ns 83.68mb |-----------------L2.?-----------------| "
- "Committing partition 1:"
- " Soft Deleting 3 files: L1.2, L2.4, L1.7"
- " Creating 2 files"
- "**** Final Output Files "
- "L1 "
- - "L1.8[1295,2000] 1ns 35.34mb |-------L1.8--------| "
+ - "L1.8[1295,2000] 11ns 35.34mb |-------L1.8--------| "
- "L2 "
- - "L2.5[1295,2587] 1ns 99.95mb |----------------L2.5----------------| "
- - "L2.6[2588,3000] 1ns 32.03mb |---L2.6---| "
- - "L2.9[1,705] 1ns 100.01mb |-------L2.9--------| "
- - "L2.10[706,1294] 1ns 83.68mb |-----L2.10-----| "
+ - "L2.5[1295,2587] 5ns 99.95mb |----------------L2.5----------------| "
+ - "L2.6[2588,3000] 5ns 32.03mb |---L2.6---| "
+ - "L2.9[1,705] 11ns 100.01mb|-------L2.9--------| "
+ - "L2.10[706,1294] 11ns 83.68mb |-----L2.10-----| "
"###
);
}
@@ -912,6 +953,8 @@ async fn start_too_large_similar_time_range() {
.with_max_time(1000)
// L1.1 or L2.2
.with_compaction_level(CompactionLevel::try_from(i as i32).unwrap())
+ // max_l0_created_at of larger level is set to be smaller
+ .with_max_l0_created_at(Time::from_timestamp_nanos(10 - i))
.with_file_size_bytes(sizes[(i - 1) as usize]),
)
.await;
@@ -923,42 +966,42 @@ async fn start_too_large_similar_time_range() {
---
- "**** Input Files "
- "L1 "
- - "L1.1[1,1000] 1ns 250mb |------------------------------------------L1.1------------------------------------------|"
+ - "L1.1[1,1000] 9ns 250mb |------------------------------------------L1.1------------------------------------------|"
- "L2 "
- - "L2.2[2,1000] 1ns 52mb |-----------------------------------------L2.2------------------------------------------| "
- - "WARNING: file L1.1[1,1000] 1ns 250mb exceeds soft limit 100mb by more than 50%"
+ - "L2.2[2,1000] 8ns 52mb |-----------------------------------------L2.2------------------------------------------| "
+ - "WARNING: file L1.1[1,1000] 9ns 250mb exceeds soft limit 100mb by more than 50%"
- "**** Simulation run 0, type=split(split_times=[401, 801]). 1 Input Files, 250mb total:"
- "L1, all files 250mb "
- - "L1.1[1,1000] 1ns |------------------------------------------L1.1------------------------------------------|"
+ - "L1.1[1,1000] 9ns |------------------------------------------L1.1------------------------------------------|"
- "**** 3 Output Files (parquet_file_id not yet assigned), 250mb total:"
- "L1 "
- - "L1.?[1,401] 1ns 100.1mb |---------------L1.?---------------| "
- - "L1.?[402,801] 1ns 99.85mb |--------------L1.?---------------| "
- - "L1.?[802,1000] 1ns 50.05mb |-----L1.?------| "
+ - "L1.?[1,401] 9ns 100.1mb |---------------L1.?---------------| "
+ - "L1.?[402,801] 9ns 99.85mb |--------------L1.?---------------| "
+ - "L1.?[802,1000] 9ns 50.05mb |-----L1.?------| "
- "Committing partition 1:"
- " Soft Deleting 1 files: L1.1"
- " Creating 3 files"
- "**** Simulation run 1, type=split(split_times=[398, 795]). 3 Input Files, 251.95mb total:"
- "L1 "
- - "L1.3[1,401] 1ns 100.1mb |---------------L1.3---------------| "
- - "L1.4[402,801] 1ns 99.85mb |--------------L1.4---------------| "
+ - "L1.3[1,401] 9ns 100.1mb |---------------L1.3---------------| "
+ - "L1.4[402,801] 9ns 99.85mb |--------------L1.4---------------| "
- "L2 "
- - "L2.2[2,1000] 1ns 52mb |-----------------------------------------L2.2------------------------------------------| "
+ - "L2.2[2,1000] 8ns 52mb |-----------------------------------------L2.2------------------------------------------| "
- "**** 3 Output Files (parquet_file_id not yet assigned), 251.95mb total:"
- "L2 "
- - "L2.?[1,398] 1ns 100.12mb |--------------L2.?---------------| "
- - "L2.?[399,795] 1ns 99.87mb |--------------L2.?---------------| "
- - "L2.?[796,1000] 1ns 51.95mb |------L2.?------| "
+ - "L2.?[1,398] 9ns 100.12mb |--------------L2.?---------------| "
+ - "L2.?[399,795] 9ns 99.87mb |--------------L2.?---------------| "
+ - "L2.?[796,1000] 9ns 51.95mb |------L2.?------| "
- "Committing partition 1:"
- " Soft Deleting 3 files: L2.2, L1.3, L1.4"
- " Creating 3 files"
- "**** Final Output Files "
- "L1 "
- - "L1.5[802,1000] 1ns 50.05mb |-----L1.5------| "
+ - "L1.5[802,1000] 9ns 50.05mb |-----L1.5------| "
- "L2 "
- - "L2.6[1,398] 1ns 100.12mb |--------------L2.6---------------| "
- - "L2.7[399,795] 1ns 99.87mb |--------------L2.7---------------| "
- - "L2.8[796,1000] 1ns 51.95mb |------L2.8------| "
+ - "L2.6[1,398] 9ns 100.12mb |--------------L2.6---------------| "
+ - "L2.7[399,795] 9ns 99.87mb |--------------L2.7---------------| "
+ - "L2.8[796,1000] 9ns 51.95mb |------L2.8------| "
"###
);
}
@@ -992,6 +1035,8 @@ async fn start_too_large_small_time_range() {
.with_max_time(1000)
// L1.1 or L2.2
.with_compaction_level(CompactionLevel::try_from(i as i32).unwrap())
+ // max_l0_created_at of larger level is set to be smaller
+ .with_max_l0_created_at(Time::from_timestamp_nanos(10 - i))
.with_file_size_bytes(sizes[(i - 1) as usize]),
)
.await;
@@ -1003,42 +1048,42 @@ async fn start_too_large_small_time_range() {
---
- "**** Input Files "
- "L1 "
- - "L1.1[0,1000] 1ns 250mb |------------------------------------------L1.1------------------------------------------|"
+ - "L1.1[0,1000] 9ns 250mb |------------------------------------------L1.1------------------------------------------|"
- "L2 "
- - "L2.2[800,1000] 1ns 52mb |------L2.2------|"
- - "WARNING: file L1.1[0,1000] 1ns 250mb exceeds soft limit 100mb by more than 50%"
+ - "L2.2[800,1000] 8ns 52mb |------L2.2------|"
+ - "WARNING: file L1.1[0,1000] 9ns 250mb exceeds soft limit 100mb by more than 50%"
- "**** Simulation run 0, type=split(split_times=[400, 800]). 1 Input Files, 250mb total:"
- "L1, all files 250mb "
- - "L1.1[0,1000] 1ns |------------------------------------------L1.1------------------------------------------|"
+ - "L1.1[0,1000] 9ns |------------------------------------------L1.1------------------------------------------|"
- "**** 3 Output Files (parquet_file_id not yet assigned), 250mb total:"
- "L1 "
- - "L1.?[0,400] 1ns 100mb |---------------L1.?---------------| "
- - "L1.?[401,800] 1ns 99.75mb |--------------L1.?---------------| "
- - "L1.?[801,1000] 1ns 50.25mb |-----L1.?------| "
+ - "L1.?[0,400] 9ns 100mb |---------------L1.?---------------| "
+ - "L1.?[401,800] 9ns 99.75mb |--------------L1.?---------------| "
+ - "L1.?[801,1000] 9ns 50.25mb |-----L1.?------| "
- "Committing partition 1:"
- " Soft Deleting 1 files: L1.1"
- " Creating 3 files"
- "**** Simulation run 1, type=split(split_times=[698, 995]). 3 Input Files, 202mb total:"
- "L1 "
- - "L1.5[801,1000] 1ns 50.25mb |-----------L1.5------------| "
- - "L1.4[401,800] 1ns 99.75mb|--------------------------L1.4---------------------------| "
+ - "L1.5[801,1000] 9ns 50.25mb |-----------L1.5------------| "
+ - "L1.4[401,800] 9ns 99.75mb|--------------------------L1.4---------------------------| "
- "L2 "
- - "L2.2[800,1000] 1ns 52mb |------------L2.2------------| "
+ - "L2.2[800,1000] 8ns 52mb |------------L2.2------------| "
- "**** 3 Output Files (parquet_file_id not yet assigned), 202mb total:"
- "L2 "
- - "L2.?[401,698] 1ns 100.16mb|-------------------L2.?-------------------| "
- - "L2.?[699,995] 1ns 99.82mb |-------------------L2.?-------------------| "
- - "L2.?[996,1000] 1ns 2.02mb |L2.?|"
+ - "L2.?[401,698] 9ns 100.16mb|-------------------L2.?-------------------| "
+ - "L2.?[699,995] 9ns 99.82mb |-------------------L2.?-------------------| "
+ - "L2.?[996,1000] 9ns 2.02mb |L2.?|"
- "Committing partition 1:"
- " Soft Deleting 3 files: L2.2, L1.4, L1.5"
- " Upgrading 1 files level to CompactionLevel::L2: L1.3"
- " Creating 3 files"
- "**** Final Output Files "
- "L2 "
- - "L2.3[0,400] 1ns 100mb |---------------L2.3---------------| "
- - "L2.6[401,698] 1ns 100.16mb |----------L2.6----------| "
- - "L2.7[699,995] 1ns 99.82mb |----------L2.7----------| "
- - "L2.8[996,1000] 1ns 2.02mb |L2.8|"
+ - "L2.3[0,400] 9ns 100mb |---------------L2.3---------------| "
+ - "L2.6[401,698] 9ns 100.16mb |----------L2.6----------| "
+ - "L2.7[699,995] 9ns 99.82mb |----------L2.7----------| "
+ - "L2.8[996,1000] 9ns 2.02mb |L2.8|"
"###
);
}
@@ -1153,6 +1198,8 @@ async fn start_too_large_small_time_range_3() {
.with_max_time((i - 1) * 1000 + 300)
// L1.1 or L2.2
.with_compaction_level(CompactionLevel::try_from(i as i32).unwrap())
+ // max_l0_created_at of larger level is set to be smaller
+ .with_max_l0_created_at(Time::from_timestamp_nanos(10 - i))
.with_file_size_bytes(sizes[(i - 1) as usize]),
)
.await;
@@ -1164,42 +1211,42 @@ async fn start_too_large_small_time_range_3() {
---
- "**** Input Files "
- "L1 "
- - "L1.1[0,300] 1ns 250mb |-------L1.1-------| "
+ - "L1.1[0,300] 9ns 250mb |-------L1.1-------| "
- "L2 "
- - "L2.2[200,1300] 1ns 52mb |-----------------------------------L2.2-----------------------------------| "
- - "WARNING: file L1.1[0,300] 1ns 250mb exceeds soft limit 100mb by more than 50%"
+ - "L2.2[200,1300] 8ns 52mb |-----------------------------------L2.2-----------------------------------| "
+ - "WARNING: file L1.1[0,300] 9ns 250mb exceeds soft limit 100mb by more than 50%"
- "**** Simulation run 0, type=split(split_times=[120, 240]). 1 Input Files, 250mb total:"
- "L1, all files 250mb "
- - "L1.1[0,300] 1ns |------------------------------------------L1.1------------------------------------------|"
+ - "L1.1[0,300] 9ns |------------------------------------------L1.1------------------------------------------|"
- "**** 3 Output Files (parquet_file_id not yet assigned), 250mb total:"
- "L1 "
- - "L1.?[0,120] 1ns 100mb |---------------L1.?---------------| "
- - "L1.?[121,240] 1ns 99.17mb |--------------L1.?---------------| "
- - "L1.?[241,300] 1ns 50.83mb |-----L1.?------| "
+ - "L1.?[0,120] 9ns 100mb |---------------L1.?---------------| "
+ - "L1.?[121,240] 9ns 99.17mb |--------------L1.?---------------| "
+ - "L1.?[241,300] 9ns 50.83mb |-----L1.?------| "
- "Committing partition 1:"
- " Soft Deleting 1 files: L1.1"
- " Creating 3 files"
- "**** Simulation run 1, type=split(split_times=[705, 1289]). 3 Input Files, 202mb total:"
- "L1 "
- - "L1.5[241,300] 1ns 50.83mb |L1.5| "
- - "L1.4[121,240] 1ns 99.17mb|-L1.4--| "
+ - "L1.5[241,300] 9ns 50.83mb |L1.5| "
+ - "L1.4[121,240] 9ns 99.17mb|-L1.4--| "
- "L2 "
- - "L2.2[200,1300] 1ns 52mb |--------------------------------------L2.2---------------------------------------| "
+ - "L2.2[200,1300] 8ns 52mb |--------------------------------------L2.2---------------------------------------| "
- "**** 3 Output Files (parquet_file_id not yet assigned), 202mb total:"
- "L2 "
- - "L2.?[121,705] 1ns 100.06mb|-------------------L2.?-------------------| "
- - "L2.?[706,1289] 1ns 99.89mb |-------------------L2.?-------------------| "
- - "L2.?[1290,1300] 1ns 2.06mb |L2.?|"
+ - "L2.?[121,705] 9ns 100.06mb|-------------------L2.?-------------------| "
+ - "L2.?[706,1289] 9ns 99.89mb |-------------------L2.?-------------------| "
+ - "L2.?[1290,1300] 9ns 2.06mb |L2.?|"
- "Committing partition 1:"
- " Soft Deleting 3 files: L2.2, L1.4, L1.5"
- " Upgrading 1 files level to CompactionLevel::L2: L1.3"
- " Creating 3 files"
- "**** Final Output Files "
- "L2 "
- - "L2.3[0,120] 1ns 100mb |-L2.3-| "
- - "L2.6[121,705] 1ns 100.06mb |-----------------L2.6-----------------| "
- - "L2.7[706,1289] 1ns 99.89mb |-----------------L2.7-----------------| "
- - "L2.8[1290,1300] 1ns 2.06mb |L2.8|"
+ - "L2.3[0,120] 9ns 100mb |-L2.3-| "
+ - "L2.6[121,705] 9ns 100.06mb |-----------------L2.6-----------------| "
+ - "L2.7[706,1289] 9ns 99.89mb |-----------------L2.7-----------------| "
+ - "L2.8[1290,1300] 9ns 2.06mb |L2.8|"
"###
);
}
@@ -1232,6 +1279,8 @@ async fn tiny_time_range() {
.with_min_time(1)
.with_max_time(2)
.with_compaction_level(CompactionLevel::FileNonOverlapped)
+ // L1 file with larger max_l0_created_at
+ .with_max_l0_created_at(Time::from_timestamp_nanos(10))
.with_file_size_bytes(l1_size),
)
.await;
@@ -1244,6 +1293,8 @@ async fn tiny_time_range() {
.with_min_time(1)
.with_max_time(1000)
.with_compaction_level(CompactionLevel::Final)
+ // L2 file with smaller max_l0_created_at
+ .with_max_l0_created_at(Time::from_timestamp_nanos(5))
.with_file_size_bytes(l2_size),
)
.await;
@@ -1255,17 +1306,17 @@ async fn tiny_time_range() {
---
- "**** Input Files "
- "L1 "
- - "L1.1[1,2] 1ns 250mb |L1.1| "
+ - "L1.1[1,2] 10ns 250mb |L1.1| "
- "L2 "
- - "L2.2[1,1000] 1ns 52mb |------------------------------------------L2.2------------------------------------------|"
- - "WARNING: file L1.1[1,2] 1ns 250mb exceeds soft limit 100mb by more than 50%"
+ - "L2.2[1,1000] 5ns 52mb |------------------------------------------L2.2------------------------------------------|"
+ - "WARNING: file L1.1[1,2] 10ns 250mb exceeds soft limit 100mb by more than 50%"
- "SKIPPED COMPACTION for PartitionId(1): partition 1 has overlapped files that exceed max compact size limit 314572800. This may happen if a large amount of data has the same timestamp"
- "**** Final Output Files "
- "L1 "
- - "L1.1[1,2] 1ns 250mb |L1.1| "
+ - "L1.1[1,2] 10ns 250mb |L1.1| "
- "L2 "
- - "L2.2[1,1000] 1ns 52mb |------------------------------------------L2.2------------------------------------------|"
- - "WARNING: file L1.1[1,2] 1ns 250mb exceeds soft limit 100mb by more than 50%"
+ - "L2.2[1,1000] 5ns 52mb |------------------------------------------L2.2------------------------------------------|"
+ - "WARNING: file L1.1[1,2] 10ns 250mb exceeds soft limit 100mb by more than 50%"
"###
);
}
diff --git a/compactor2_test_utils/src/lib.rs b/compactor2_test_utils/src/lib.rs
index e23956f8f4..43049ef8dc 100644
--- a/compactor2_test_utils/src/lib.rs
+++ b/compactor2_test_utils/src/lib.rs
@@ -595,6 +595,11 @@ impl TestSetup {
.await
}
+ /// Get the parquet files including the soft-deleted stored in the catalog
+ pub async fn list_by_table(&self) -> Vec<ParquetFile> {
+ self.catalog.list_by_table(self.table.table.id).await
+ }
+
/// Reads the specified parquet file out of object store
pub async fn read_parquet_file(&self, file: ParquetFile) -> Vec<RecordBatch> {
assert_eq!(file.table_id, self.table.table.id);
diff --git a/iox_catalog/src/interface.rs b/iox_catalog/src/interface.rs
index 6bf8a165fa..ce032d4226 100644
--- a/iox_catalog/src/interface.rs
+++ b/iox_catalog/src/interface.rs
@@ -679,6 +679,10 @@ pub trait ParquetFileRepo: Send + Sync {
/// [`to_delete`](ParquetFile::to_delete).
async fn list_by_table_not_to_delete(&mut self, table_id: TableId) -> Result<Vec<ParquetFile>>;
+ /// List all parquet files within a given table including those marked as [`to_delete`](ParquetFile::to_delete).
+ /// This is for debug purpose
+ async fn list_by_table(&mut self, table_id: TableId) -> Result<Vec<ParquetFile>>;
+
/// Delete all parquet files that were marked to be deleted earlier than the specified time.
/// Returns the deleted records.
async fn delete_old(&mut self, older_than: Timestamp) -> Result<Vec<ParquetFile>>;
@@ -2743,6 +2747,15 @@ pub(crate) mod test_helpers {
assert_matches!(deleted_files.as_slice(), []);
assert!(repos.parquet_files().exist(parquet_file.id).await.unwrap());
+ // test list_by_table that includes soft-deleted file
+ // at this time the file is not soft-deleted yet and will be included in the returned list
+ let files = repos
+ .parquet_files()
+ .list_by_table(parquet_file.table_id)
+ .await
+ .unwrap();
+ assert_eq!(files.len(), 1);
+
// verify to_delete can be updated to a timestamp
repos
.parquet_files()
@@ -2757,6 +2770,15 @@ pub(crate) mod test_helpers {
let marked_deleted = files.first().unwrap();
assert!(marked_deleted.to_delete.is_some());
+ // test list_by_table that includes soft-deleted file
+ // at this time the file is soft-deleted and will be included in the returned list
+ let files = repos
+ .parquet_files()
+ .list_by_table(parquet_file.table_id)
+ .await
+ .unwrap();
+ assert_eq!(files.len(), 1);
+
// File is not deleted if it was marked to be deleted after the specified time
let before_deleted = Timestamp::new(
(catalog.time_provider().now() - Duration::from_secs(100)).timestamp_nanos(),
@@ -2769,12 +2791,31 @@ pub(crate) mod test_helpers {
assert!(deleted_files.is_empty());
assert!(repos.parquet_files().exist(parquet_file.id).await.unwrap());
+ // test list_by_table that includes soft-deleted file
+ // at this time the file is not actually hard deleted yet and stay as soft deleted
+ // and will be returned in the list
+ let files = repos
+ .parquet_files()
+ .list_by_table(parquet_file.table_id)
+ .await
+ .unwrap();
+ assert_eq!(files.len(), 1);
+
// File is deleted if it was marked to be deleted before the specified time
let deleted_files = repos.parquet_files().delete_old(older_than).await.unwrap();
assert_eq!(deleted_files.len(), 1);
assert_eq!(marked_deleted, &deleted_files[0]);
assert!(!repos.parquet_files().exist(parquet_file.id).await.unwrap());
+ // test list_by_table that includes soft-deleted file
+ // at this time the file is hard deleted -> the returned list is empty
+ let files = repos
+ .parquet_files()
+ .list_by_table(parquet_file.table_id)
+ .await
+ .unwrap();
+ assert_eq!(files.len(), 0);
+
// test list_by_table_not_to_delete
let files = repos
.parquet_files()
@@ -2789,6 +2830,16 @@ pub(crate) mod test_helpers {
.unwrap();
assert_eq!(files, vec![other_file.clone()]);
+ // test list_by_table
+ println!("parquet_file.table_id = {}", parquet_file.table_id);
+ let files = repos
+ .parquet_files()
+ // .list_by_table(parquet_file.table_id) // todo: tables of deleted files
+ .list_by_table(other_file.table_id)
+ .await
+ .unwrap();
+ assert_eq!(files.len(), 1);
+
// test list_by_namespace_not_to_delete
let namespace2 = repos
.namespaces()
diff --git a/iox_catalog/src/mem.rs b/iox_catalog/src/mem.rs
index 4e196960e3..7915eed462 100644
--- a/iox_catalog/src/mem.rs
+++ b/iox_catalog/src/mem.rs
@@ -1291,6 +1291,18 @@ impl ParquetFileRepo for MemTxn {
Ok(parquet_files)
}
+ async fn list_by_table(&mut self, table_id: TableId) -> Result<Vec<ParquetFile>> {
+ let stage = self.stage();
+
+ let parquet_files: Vec<_> = stage
+ .parquet_files
+ .iter()
+ .filter(|f| table_id == f.table_id)
+ .cloned()
+ .collect();
+ Ok(parquet_files)
+ }
+
async fn delete_old(&mut self, older_than: Timestamp) -> Result<Vec<ParquetFile>> {
let stage = self.stage();
diff --git a/iox_catalog/src/metrics.rs b/iox_catalog/src/metrics.rs
index 60d2a91214..20f213884c 100644
--- a/iox_catalog/src/metrics.rs
+++ b/iox_catalog/src/metrics.rs
@@ -281,6 +281,7 @@ decorate!(
"parquet_list_by_shard_greater_than" = list_by_shard_greater_than(&mut self, shard_id: ShardId, sequence_number: SequenceNumber) -> Result<Vec<ParquetFile>>;
"parquet_list_by_namespace_not_to_delete" = list_by_namespace_not_to_delete(&mut self, namespace_id: NamespaceId) -> Result<Vec<ParquetFile>>;
"parquet_list_by_table_not_to_delete" = list_by_table_not_to_delete(&mut self, table_id: TableId) -> Result<Vec<ParquetFile>>;
+ "parquet_list_by_table" = list_by_table(&mut self, table_id: TableId) -> Result<Vec<ParquetFile>>;
"parquet_delete_old" = delete_old(&mut self, older_than: Timestamp) -> Result<Vec<ParquetFile>>;
"parquet_delete_old_ids_only" = delete_old_ids_only(&mut self, older_than: Timestamp) -> Result<Vec<ParquetFileId>>;
"parquet_list_by_partition_not_to_delete" = list_by_partition_not_to_delete(&mut self, partition_id: PartitionId) -> Result<Vec<ParquetFile>>;
diff --git a/iox_catalog/src/postgres.rs b/iox_catalog/src/postgres.rs
index 0548fd2be3..c59c8886ba 100644
--- a/iox_catalog/src/postgres.rs
+++ b/iox_catalog/src/postgres.rs
@@ -1914,6 +1914,24 @@ WHERE table_id = $1 AND to_delete IS NULL;
.map_err(|e| Error::SqlxError { source: e })
}
+ async fn list_by_table(&mut self, table_id: TableId) -> Result<Vec<ParquetFile>> {
+ // Deliberately doesn't use `SELECT *` to avoid the performance hit of fetching the large
+ // `parquet_metadata` column!!
+ sqlx::query_as::<_, ParquetFile>(
+ r#"
+SELECT id, shard_id, namespace_id, table_id, partition_id, object_store_id,
+ max_sequence_number, min_time, max_time, to_delete, file_size_bytes,
+ row_count, compaction_level, created_at, column_set, max_l0_created_at
+FROM parquet_file
+WHERE table_id = $1;
+ "#,
+ )
+ .bind(table_id) // $1
+ .fetch_all(&mut self.inner)
+ .await
+ .map_err(|e| Error::SqlxError { source: e })
+ }
+
async fn delete_old(&mut self, older_than: Timestamp) -> Result<Vec<ParquetFile>> {
sqlx::query_as::<_, ParquetFile>(
r#"
diff --git a/iox_catalog/src/sqlite.rs b/iox_catalog/src/sqlite.rs
index 8928d0c8e8..38e4149860 100644
--- a/iox_catalog/src/sqlite.rs
+++ b/iox_catalog/src/sqlite.rs
@@ -1797,6 +1797,27 @@ WHERE table_id = $1 AND to_delete IS NULL;
.collect())
}
+ async fn list_by_table(&mut self, table_id: TableId) -> Result<Vec<ParquetFile>> {
+ // Deliberately doesn't use `SELECT *` to avoid the performance hit of fetching the large
+ // `parquet_metadata` column!!
+ Ok(sqlx::query_as::<_, ParquetFilePod>(
+ r#"
+SELECT id, shard_id, namespace_id, table_id, partition_id, object_store_id,
+ max_sequence_number, min_time, max_time, to_delete, file_size_bytes,
+ row_count, compaction_level, created_at, column_set, max_l0_created_at
+FROM parquet_file
+WHERE table_id = $1;
+ "#,
+ )
+ .bind(table_id) // $1
+ .fetch_all(self.inner.get_mut())
+ .await
+ .map_err(|e| Error::SqlxError { source: e })?
+ .into_iter()
+ .map(Into::into)
+ .collect())
+ }
+
async fn delete_old(&mut self, older_than: Timestamp) -> Result<Vec<ParquetFile>> {
Ok(sqlx::query_as::<_, ParquetFilePod>(
r#"
diff --git a/iox_tests/src/catalog.rs b/iox_tests/src/catalog.rs
index 7e6c660f4f..e7b488911d 100644
--- a/iox_tests/src/catalog.rs
+++ b/iox_tests/src/catalog.rs
@@ -277,6 +277,17 @@ impl TestCatalog {
.unwrap()
}
+ /// List all files including the soft deleted ones
+ pub async fn list_by_table(self: &Arc<Self>, table_id: TableId) -> Vec<ParquetFile> {
+ self.catalog
+ .repositories()
+ .await
+ .parquet_files()
+ .list_by_table(table_id)
+ .await
+ .unwrap()
+ }
+
/// Add a partition into skipped compaction
pub async fn add_to_skipped_compaction(
self: &Arc<Self>,
|
40865e011c2ae02eaf8eca707433d2067f6cdde0
|
Joe-Blount
|
2023-06-26 16:21:24
|
compactor loop on L1 files (#8082)
|
* chore: suppress insta run output on some long tests
* fix: prevent L1 compaction looping
* chore: insta updates from prior commit
* chore: addresss comments
| null |
fix: compactor loop on L1 files (#8082)
* chore: suppress insta run output on some long tests
* fix: prevent L1 compaction looping
* chore: insta updates from prior commit
* chore: addresss comments
|
diff --git a/compactor/src/components/round_info_source/mod.rs b/compactor/src/components/round_info_source/mod.rs
index 271c768108..6d6ab2f3d4 100644
--- a/compactor/src/components/round_info_source/mod.rs
+++ b/compactor/src/components/round_info_source/mod.rs
@@ -1,8 +1,10 @@
use std::{
+ cmp::max,
fmt::{Debug, Display},
sync::Arc,
};
+use crate::components::split_or_compact::start_level_files_to_split::split_into_chains;
use async_trait::async_trait;
use data_types::{CompactionLevel, ParquetFile, Timestamp};
use observability_deps::tracing::debug;
@@ -102,12 +104,44 @@ impl LevelBasedRoundInfo {
// plan, run a pre-phase to reduce the number of files first
let num_overlapped_files = get_num_overlapped_files(start_level_files, next_level_files);
if num_start_level + num_overlapped_files > self.max_num_files_per_plan {
+ // This scaenario meets the simple criteria of start level files + their overlaps are lots of files.
+ // But ManySmallFiles implies we must compact only within the start level to reduce the quantity of
+ // start level files. There are several reasons why that might be unhelpful.
+
+ // Reason 1: Maybe its many LARGE files making reduction of file count in the start level impossible.
if size_start_level / num_start_level
> self.max_total_file_size_per_plan / self.max_num_files_per_plan
{
// Average start level file size is more than the average implied by max bytes & files per plan.
// Even though there are "many files", this is not "many small files".
- // There isn't much (perhaps not any) file reduction to be done, so don't try.
+ // There isn't much (perhaps not any) file reduction to be done, attempting it can get us stuck
+ // in a loop.
+ return false;
+ }
+
+ // Reason 2: Maybe there are so many start level files because we did a bunch of splits.
+ // Note that we'll do splits to ensure each start level file overlaps at most one target level file.
+ // If the prior round did that, and now we declare this ManySmallFiles, which forces compactions
+ // within the start level, we'll undo the splits performed in the prior round, which can get us
+ // stuck in a loop.
+ let chains = split_into_chains(files.to_vec());
+ let mut max_target_level_files: usize = 0;
+ let mut max_chain_len: usize = 0;
+ for chain in chains {
+ let target_file_cnt = chain
+ .iter()
+ .filter(|f| f.compaction_level == start_level.next())
+ .count();
+ max_target_level_files = max(max_target_level_files, target_file_cnt);
+
+ let chain_len = chain.len();
+ max_chain_len = max(max_chain_len, chain_len);
+ }
+ if max_target_level_files <= 1 && max_chain_len <= self.max_num_files_per_plan {
+ // All of our start level files overlap with at most one target level file. If the prior round did
+ // splits to cause this, declaring this a ManySmallFiles case can lead to an endless loop.
+ // If we got lucky and this happened without splits, declaring this ManySmallFiles will waste
+ // our good fortune.
return false;
}
return true;
diff --git a/compactor/src/components/split_or_compact/files_to_compact.rs b/compactor/src/components/split_or_compact/files_to_compact.rs
index 0da9f99920..104460deaf 100644
--- a/compactor/src/components/split_or_compact/files_to_compact.rs
+++ b/compactor/src/components/split_or_compact/files_to_compact.rs
@@ -142,6 +142,14 @@ pub fn limit_files_to_compact(
} else {
files_to_keep.push(file);
}
+ // TODO(maybe): See matching comment in stuck.rs/stuck_l1
+ // Its possible we split a few L1s so they don't overlap with too many L2s, then decided to compact just
+ // a few of them above. Now this break will cause us to leave the rest of the L1s we split to wait for
+ // the next round that may never come. The few we're compacting now may be enough to make the L1s fall
+ // below the threshold for compacting another round.
+ // We could set something here that's passed all the way up to the loop in `try_compact_partition` that
+ // skips the partition filter and forces it to do one more round because this round was stopped prematurely.
+ // That flag would mean: 'I'm in the middle of something, so ignore the continue critiera and let me finish'
break;
}
}
diff --git a/compactor/src/driver.rs b/compactor/src/driver.rs
index c7160ba7f9..7b3a6f8398 100644
--- a/compactor/src/driver.rs
+++ b/compactor/src/driver.rs
@@ -313,7 +313,7 @@ async fn execute_branch(
split_or_compact,
} = files_to_make_progress_on;
- // Compact
+ // Compact & Split
let created_file_params = run_plans(
split_or_compact.clone(),
&partition_info,
diff --git a/compactor/tests/layouts/backfill.rs b/compactor/tests/layouts/backfill.rs
index 26105dbd88..11fac2c66b 100644
--- a/compactor/tests/layouts/backfill.rs
+++ b/compactor/tests/layouts/backfill.rs
@@ -879,57 +879,10 @@ async fn random_backfill_over_l2s() {
- "Committing partition 1:"
- " Soft Deleting 1 files: L1.99"
- " Creating 2 files"
- - "**** Simulation run 23, type=split(CompactAndSplitOutput(ManySmallFiles))(split_times=[612, 801]). 10 Input Files, 299mb total:"
+ - "**** Simulation run 23, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[437, 497]). 4 Input Files, 204mb total:"
- "L1 "
- - "L1.119[423,452] 1.05us 15mb|L1.119| "
- - "L1.120[453,499] 1.05us 25mb |L1.120| "
- - "L1.100[500,599] 1.05us 52mb |---L1.100----| "
- - "L1.101[600,611] 1.05us 7mb |L1.101| "
- - "L1.97[612,699] 1.05us 47mb |---L1.97---| "
- - "L1.98[700,733] 1.05us 18mb |L1.98| "
- - "L1.108[734,799] 1.05us 35mb |-L1.108-| "
- - "L1.109[800,899] 1.05us 53mb |---L1.109----| "
- - "L1.110[900,922] 1.05us 13mb |L1.110| "
- - "L1.96[923,986] 1.05us 34mb |-L1.96--| "
- - "**** 3 Output Files (parquet_file_id not yet assigned), 299mb total:"
- - "L1 "
- - "L1.?[423,612] 1.05us 101mb|------------L1.?------------| "
- - "L1.?[613,801] 1.05us 100mb |------------L1.?------------| "
- - "L1.?[802,986] 1.05us 99mb |-----------L1.?------------| "
- - "Committing partition 1:"
- - " Soft Deleting 10 files: L1.96, L1.97, L1.98, L1.100, L1.101, L1.108, L1.109, L1.110, L1.119, L1.120"
- - " Creating 3 files"
- - "**** Simulation run 24, type=split(ReduceOverlap)(split_times=[899]). 1 Input Files, 99mb total:"
- - "L1, all files 99mb "
- - "L1.123[802,986] 1.05us |-----------------------------------------L1.123-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 99mb total:"
- - "L1 "
- - "L1.?[802,899] 1.05us 52mb|--------------------L1.?---------------------| "
- - "L1.?[900,986] 1.05us 47mb |------------------L1.?------------------| "
- - "**** Simulation run 25, type=split(ReduceOverlap)(split_times=[699, 799]). 1 Input Files, 100mb total:"
- - "L1, all files 100mb "
- - "L1.122[613,801] 1.05us |-----------------------------------------L1.122-----------------------------------------|"
- - "**** 3 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L1 "
- - "L1.?[613,699] 1.05us 46mb|-----------------L1.?------------------| "
- - "L1.?[700,799] 1.05us 53mb |--------------------L1.?---------------------| "
- - "L1.?[800,801] 1.05us 2mb |L1.?|"
- - "**** Simulation run 26, type=split(ReduceOverlap)(split_times=[452, 499, 599]). 1 Input Files, 101mb total:"
- - "L1, all files 101mb "
- - "L1.121[423,612] 1.05us |-----------------------------------------L1.121-----------------------------------------|"
- - "**** 4 Output Files (parquet_file_id not yet assigned), 101mb total:"
- - "L1 "
- - "L1.?[423,452] 1.05us 15mb|---L1.?----| "
- - "L1.?[453,499] 1.05us 24mb |-------L1.?--------| "
- - "L1.?[500,599] 1.05us 53mb |--------------------L1.?---------------------| "
- - "L1.?[600,612] 1.05us 8mb |L1.?|"
- - "Committing partition 1:"
- - " Soft Deleting 3 files: L1.121, L1.122, L1.123"
- - " Creating 9 files"
- - "**** Simulation run 27, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[437, 497]). 4 Input Files, 204mb total:"
- - "L1 "
- - "L1.129[423,452] 1.05us 15mb |------L1.129-------| "
- - "L1.130[453,499] 1.05us 24mb |------------L1.130-------------| "
+ - "L1.119[423,452] 1.05us 15mb |------L1.119-------| "
+ - "L1.120[453,499] 1.05us 25mb |------------L1.120-------------| "
- "L2 "
- "L2.117[377,452] 1.05us 100mb|-----------------------L2.117------------------------| "
- "L2.118[453,499] 1.05us 64mb |------------L2.118-------------| "
@@ -939,68 +892,68 @@ async fn random_backfill_over_l2s() {
- "L2.?[438,497] 1.05us 99mb |------------------L2.?-------------------| "
- "L2.?[498,499] 1.05us 5mb |L2.?|"
- "Committing partition 1:"
- - " Soft Deleting 4 files: L2.117, L2.118, L1.129, L1.130"
+ - " Soft Deleting 4 files: L2.117, L2.118, L1.119, L1.120"
- " Creating 3 files"
- - "**** Simulation run 28, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[577, 654]). 4 Input Files, 261mb total:"
+ - "**** Simulation run 24, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[577, 654]). 4 Input Files, 259mb total:"
- "L1 "
- - "L1.131[500,599] 1.05us 53mb|------------------L1.131------------------| "
- - "L1.132[600,612] 1.05us 8mb |L1.132| "
+ - "L1.100[500,599] 1.05us 52mb|------------------L1.100------------------| "
+ - "L1.101[600,611] 1.05us 7mb |L1.101| "
- "L2 "
- "L2.6[500,599] 599ns 100mb|-------------------L2.6-------------------| "
- "L2.7[600,699] 699ns 100mb |-------------------L2.7-------------------| "
- - "**** 3 Output Files (parquet_file_id not yet assigned), 261mb total:"
+ - "**** 3 Output Files (parquet_file_id not yet assigned), 259mb total:"
- "L2 "
- - "L2.?[500,577] 1.05us 101mb|--------------L2.?--------------| "
- - "L2.?[578,654] 1.05us 100mb |--------------L2.?--------------| "
+ - "L2.?[500,577] 1.05us 100mb|--------------L2.?--------------| "
+ - "L2.?[578,654] 1.05us 99mb |--------------L2.?--------------| "
- "L2.?[655,699] 1.05us 60mb |------L2.?-------| "
- "Committing partition 1:"
- - " Soft Deleting 4 files: L2.6, L2.7, L1.131, L1.132"
+ - " Soft Deleting 4 files: L2.6, L2.7, L1.100, L1.101"
- " Creating 3 files"
- - "**** Simulation run 29, type=split(ReduceOverlap)(split_times=[654]). 1 Input Files, 46mb total:"
- - "L1, all files 46mb "
- - "L1.126[613,699] 1.05us |-----------------------------------------L1.126-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 46mb total:"
+ - "**** Simulation run 25, type=split(ReduceOverlap)(split_times=[654]). 1 Input Files, 47mb total:"
+ - "L1, all files 47mb "
+ - "L1.97[612,699] 1.05us |-----------------------------------------L1.97------------------------------------------|"
+ - "**** 2 Output Files (parquet_file_id not yet assigned), 47mb total:"
- "L1 "
- - "L1.?[613,654] 1.05us 22mb|------------------L1.?------------------| "
- - "L1.?[655,699] 1.05us 24mb |--------------------L1.?--------------------| "
+ - "L1.?[612,654] 1.05us 23mb|------------------L1.?-------------------| "
+ - "L1.?[655,699] 1.05us 24mb |-------------------L1.?--------------------| "
- "Committing partition 1:"
- - " Soft Deleting 1 files: L1.126"
+ - " Soft Deleting 1 files: L1.97"
- " Creating 2 files"
- - "**** Simulation run 30, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[637, 696]). 4 Input Files, 206mb total:"
+ - "**** Simulation run 26, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[637, 696]). 4 Input Files, 206mb total:"
- "L1 "
- - "L1.139[613,654] 1.05us 22mb |-----------L1.139-----------| "
- - "L1.140[655,699] 1.05us 24mb |------------L1.140------------| "
+ - "L1.127[612,654] 1.05us 23mb |-----------L1.127------------| "
+ - "L1.128[655,699] 1.05us 24mb |------------L1.128------------| "
- "L2 "
- - "L2.137[578,654] 1.05us 100mb|------------------------L2.137------------------------| "
- - "L2.138[655,699] 1.05us 60mb |------------L2.138------------| "
+ - "L2.125[578,654] 1.05us 99mb|------------------------L2.125------------------------| "
+ - "L2.126[655,699] 1.05us 60mb |------------L2.126------------| "
- "**** 3 Output Files (parquet_file_id not yet assigned), 206mb total:"
- "L2 "
- "L2.?[578,637] 1.05us 100mb|------------------L2.?-------------------| "
- "L2.?[638,696] 1.05us 99mb |------------------L2.?-------------------| "
- "L2.?[697,699] 1.05us 7mb |L2.?|"
- "Committing partition 1:"
- - " Soft Deleting 4 files: L2.137, L2.138, L1.139, L1.140"
+ - " Soft Deleting 4 files: L2.125, L2.126, L1.127, L1.128"
- " Creating 3 files"
- - "**** Simulation run 31, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[779, 858]). 4 Input Files, 254mb total:"
+ - "**** Simulation run 27, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[765]). 3 Input Files, 153mb total:"
- "L1 "
- - "L1.127[700,799] 1.05us 53mb|------------------L1.127------------------| "
- - "L1.128[800,801] 1.05us 2mb |L1.128| "
+ - "L1.98[700,733] 1.05us 18mb|-----------L1.98-----------| "
+ - "L1.108[734,799] 1.05us 35mb |-------------------------L1.108--------------------------| "
- "L2 "
- - "L2.8[700,799] 799ns 100mb|-------------------L2.8-------------------| "
- - "L2.9[800,899] 899ns 100mb |-------------------L2.9-------------------| "
- - "**** 3 Output Files (parquet_file_id not yet assigned), 254mb total:"
+ - "L2.8[700,799] 799ns 100mb|-----------------------------------------L2.8------------------------------------------| "
+ - "**** 2 Output Files (parquet_file_id not yet assigned), 153mb total:"
- "L2 "
- - "L2.?[700,779] 1.05us 101mb|--------------L2.?---------------| "
- - "L2.?[780,858] 1.05us 100mb |--------------L2.?---------------| "
- - "L2.?[859,899] 1.05us 54mb |------L2.?------| "
+ - "L2.?[700,765] 1.05us 100mb|--------------------------L2.?---------------------------| "
+ - "L2.?[766,799] 1.05us 53mb |-----------L2.?------------| "
- "Committing partition 1:"
- - " Soft Deleting 4 files: L2.8, L2.9, L1.127, L1.128"
- - " Creating 3 files"
- - "**** Final Output Files (4.47gb written)"
+ - " Soft Deleting 3 files: L2.8, L1.98, L1.108"
+ - " Creating 2 files"
+ - "**** Final Output Files (3.78gb written)"
- "L1 "
- - "L1.124[802,899] 1.05us 52mb |L1.124| "
- - "L1.125[900,986] 1.05us 47mb |L1.125| "
+ - "L1.96[923,986] 1.05us 34mb |L1.96|"
+ - "L1.109[800,899] 1.05us 53mb |L1.109| "
+ - "L1.110[900,922] 1.05us 13mb |L1.110| "
- "L2 "
+ - "L2.9[800,899] 899ns 100mb |-L2.9-| "
- "L2.10[900,999] 999ns 100mb |L2.10-| "
- "L2.111[0,71] 1.05us 101mb|L2.111| "
- "L2.112[72,142] 1.05us 99mb |L2.112| "
@@ -1008,16 +961,15 @@ async fn random_backfill_over_l2s() {
- "L2.114[200,265] 1.05us 100mb |L2.114| "
- "L2.115[266,299] 1.05us 53mb |L2.115| "
- "L2.116[300,376] 1.05us 101mb |L2.116| "
- - "L2.133[377,437] 1.05us 100mb |L2.133| "
- - "L2.134[438,497] 1.05us 99mb |L2.134| "
- - "L2.135[498,499] 1.05us 5mb |L2.135| "
- - "L2.136[500,577] 1.05us 101mb |L2.136| "
- - "L2.141[578,637] 1.05us 100mb |L2.141| "
- - "L2.142[638,696] 1.05us 99mb |L2.142| "
- - "L2.143[697,699] 1.05us 7mb |L2.143| "
- - "L2.144[700,779] 1.05us 101mb |L2.144| "
- - "L2.145[780,858] 1.05us 100mb |L2.145| "
- - "L2.146[859,899] 1.05us 54mb |L2.146| "
+ - "L2.121[377,437] 1.05us 100mb |L2.121| "
+ - "L2.122[438,497] 1.05us 99mb |L2.122| "
+ - "L2.123[498,499] 1.05us 5mb |L2.123| "
+ - "L2.124[500,577] 1.05us 100mb |L2.124| "
+ - "L2.129[578,637] 1.05us 100mb |L2.129| "
+ - "L2.130[638,696] 1.05us 99mb |L2.130| "
+ - "L2.131[697,699] 1.05us 7mb |L2.131| "
+ - "L2.132[700,765] 1.05us 100mb |L2.132| "
+ - "L2.133[766,799] 1.05us 53mb |L2.133| "
"###
);
}
diff --git a/compactor/tests/layouts/many_files.rs b/compactor/tests/layouts/many_files.rs
index 3dca11825a..a7a68283a0 100644
--- a/compactor/tests/layouts/many_files.rs
+++ b/compactor/tests/layouts/many_files.rs
@@ -189,39 +189,30 @@ async fn many_l1_files_different_created_order() {
- "L1.2[31,40] 2ns |-------L1.2-------| "
- "L1.3[21,30] 3ns |-------L1.3-------| "
- "L1.4[41,50] 4ns |-------L1.4-------| "
- - "**** Simulation run 0, type=compact(ManySmallFiles). 2 Input Files, 5kb total:"
+ - "**** Simulation run 0, type=compact(FoundSubsetLessThanMaxCompactSize). 2 Input Files, 5kb total:"
- "L1, all files 3kb "
- "L1.1[11,20] 1ns |------------------L1.1------------------| "
- "L1.3[21,30] 3ns |------------------L1.3------------------| "
- "**** 1 Output Files (parquet_file_id not yet assigned), 5kb total:"
- - "L1, all files 5kb "
- - "L1.?[11,30] 3ns |------------------------------------------L1.?------------------------------------------|"
+ - "L2, all files 5kb "
+ - "L2.?[11,30] 3ns |------------------------------------------L2.?------------------------------------------|"
- "Committing partition 1:"
- " Soft Deleting 2 files: L1.1, L1.3"
- " Creating 1 files"
- - "**** Simulation run 1, type=compact(ManySmallFiles). 2 Input Files, 5kb total:"
+ - "**** Simulation run 1, type=compact(FoundSubsetLessThanMaxCompactSize). 2 Input Files, 5kb total:"
- "L1, all files 3kb "
- "L1.2[31,40] 2ns |------------------L1.2------------------| "
- "L1.4[41,50] 4ns |------------------L1.4------------------| "
- "**** 1 Output Files (parquet_file_id not yet assigned), 5kb total:"
- - "L1, all files 5kb "
- - "L1.?[31,50] 4ns |------------------------------------------L1.?------------------------------------------|"
+ - "L2, all files 5kb "
+ - "L2.?[31,50] 4ns |------------------------------------------L2.?------------------------------------------|"
- "Committing partition 1:"
- " Soft Deleting 2 files: L1.2, L1.4"
- " Creating 1 files"
- - "**** Simulation run 2, type=compact(FoundSubsetLessThanMaxCompactSize). 2 Input Files, 10kb total:"
- - "L1, all files 5kb "
- - "L1.5[11,30] 3ns |------------------L1.5-------------------| "
- - "L1.6[31,50] 4ns |------------------L1.6-------------------| "
- - "**** 1 Output Files (parquet_file_id not yet assigned), 10kb total:"
- - "L2, all files 10kb "
- - "L2.?[11,50] 4ns |------------------------------------------L2.?------------------------------------------|"
- - "Committing partition 1:"
- - " Soft Deleting 2 files: L1.5, L1.6"
- - " Creating 1 files"
- - "**** Final Output Files (20kb written)"
- - "L2, all files 10kb "
- - "L2.7[11,50] 4ns |------------------------------------------L2.7------------------------------------------|"
+ - "**** Final Output Files (10kb written)"
+ - "L2, all files 5kb "
+ - "L2.5[11,30] 3ns |------------------L2.5-------------------| "
+ - "L2.6[31,50] 4ns |------------------L2.6-------------------| "
"###
);
}
@@ -297,39 +288,31 @@ async fn many_l0_files_different_created_order_non_overlap() {
- "L0.2[31,40] 2ns |-------L0.2-------| "
- "L0.3[21,30] 3ns |-------L0.3-------| "
- "L0.4[41,50] 4ns |-------L0.4-------| "
- - "**** Simulation run 0, type=compact(ManySmallFiles). 2 Input Files, 5kb total:"
+ - "**** Simulation run 0, type=compact(FoundSubsetLessThanMaxCompactSize). 2 Input Files, 5kb total:"
- "L0, all files 3kb "
- "L0.1[11,20] 1ns |----------L0.1-----------| "
- "L0.2[31,40] 2ns |----------L0.2-----------| "
- "**** 1 Output Files (parquet_file_id not yet assigned), 5kb total:"
- - "L0, all files 5kb "
- - "L0.?[11,40] 2ns |------------------------------------------L0.?------------------------------------------|"
+ - "L1, all files 5kb "
+ - "L1.?[11,40] 2ns |------------------------------------------L1.?------------------------------------------|"
- "Committing partition 1:"
- " Soft Deleting 2 files: L0.1, L0.2"
- " Creating 1 files"
- - "**** Simulation run 1, type=compact(ManySmallFiles). 2 Input Files, 5kb total:"
- - "L0, all files 3kb "
- - "L0.3[21,30] 3ns |----------L0.3-----------| "
- - "L0.4[41,50] 4ns |----------L0.4-----------| "
- - "**** 1 Output Files (parquet_file_id not yet assigned), 5kb total:"
- - "L0, all files 5kb "
- - "L0.?[21,50] 4ns |------------------------------------------L0.?------------------------------------------|"
- - "Committing partition 1:"
- - " Soft Deleting 2 files: L0.3, L0.4"
- - " Creating 1 files"
- - "**** Simulation run 2, type=compact(FoundSubsetLessThanMaxCompactSize). 2 Input Files, 10kb total:"
- - "L0, all files 5kb "
- - "L0.5[11,40] 2ns |------------------------------L0.5------------------------------| "
- - "L0.6[21,50] 4ns |------------------------------L0.6------------------------------| "
+ - "**** Simulation run 1, type=compact(FoundSubsetLessThanMaxCompactSize). 3 Input Files, 10kb total:"
+ - "L0 "
+ - "L0.3[21,30] 3ns 3kb |-------L0.3-------| "
+ - "L0.4[41,50] 4ns 3kb |-------L0.4-------| "
+ - "L1 "
+ - "L1.5[11,40] 2ns 5kb |------------------------------L1.5------------------------------| "
- "**** 1 Output Files (parquet_file_id not yet assigned), 10kb total:"
- "L1, all files 10kb "
- "L1.?[11,50] 4ns |------------------------------------------L1.?------------------------------------------|"
- "Committing partition 1:"
- - " Soft Deleting 2 files: L0.5, L0.6"
+ - " Soft Deleting 3 files: L0.3, L0.4, L1.5"
- " Creating 1 files"
- - "**** Final Output Files (20kb written)"
+ - "**** Final Output Files (15kb written)"
- "L1, all files 10kb "
- - "L1.7[11,50] 4ns |------------------------------------------L1.7------------------------------------------|"
+ - "L1.6[11,50] 4ns |------------------------------------------L1.6------------------------------------------|"
"###
);
}
@@ -2315,7 +2298,7 @@ async fn many_tiny_l1_files() {
- "L1.286[570,571] 286ns |L1.286|"
- "L1.287[572,573] 287ns |L1.287|"
- "L1.288[574,575] 288ns |L1.288|"
- - "**** Simulation run 0, type=compact(ManySmallFiles). 200 Input Files, 1mb total:"
+ - "**** Simulation run 0, type=compact(FoundSubsetLessThanMaxCompactSize). 200 Input Files, 1mb total:"
- "L1, all files 7kb "
- "L1.1[0,1] 1ns |L1.1| "
- "L1.2[2,3] 2ns |L1.2| "
@@ -2518,111 +2501,111 @@ async fn many_tiny_l1_files() {
- "L1.199[396,397] 199ns |L1.199|"
- "L1.200[398,399] 200ns |L1.200|"
- "**** 1 Output Files (parquet_file_id not yet assigned), 1mb total:"
- - "L1, all files 1mb "
- - "L1.?[0,399] 200ns |------------------------------------------L1.?------------------------------------------|"
+ - "L2, all files 1mb "
+ - "L2.?[0,399] 200ns |------------------------------------------L2.?------------------------------------------|"
- "Committing partition 1:"
- " Soft Deleting 200 files: L1.1, L1.2, L1.3, L1.4, L1.5, L1.6, L1.7, L1.8, L1.9, L1.10, L1.11, L1.12, L1.13, L1.14, L1.15, L1.16, L1.17, L1.18, L1.19, L1.20, L1.21, L1.22, L1.23, L1.24, L1.25, L1.26, L1.27, L1.28, L1.29, L1.30, L1.31, L1.32, L1.33, L1.34, L1.35, L1.36, L1.37, L1.38, L1.39, L1.40, L1.41, L1.42, L1.43, L1.44, L1.45, L1.46, L1.47, L1.48, L1.49, L1.50, L1.51, L1.52, L1.53, L1.54, L1.55, L1.56, L1.57, L1.58, L1.59, L1.60, L1.61, L1.62, L1.63, L1.64, L1.65, L1.66, L1.67, L1.68, L1.69, L1.70, L1.71, L1.72, L1.73, L1.74, L1.75, L1.76, L1.77, L1.78, L1.79, L1.80, L1.81, L1.82, L1.83, L1.84, L1.85, L1.86, L1.87, L1.88, L1.89, L1.90, L1.91, L1.92, L1.93, L1.94, L1.95, L1.96, L1.97, L1.98, L1.99, L1.100, L1.101, L1.102, L1.103, L1.104, L1.105, L1.106, L1.107, L1.108, L1.109, L1.110, L1.111, L1.112, L1.113, L1.114, L1.115, L1.116, L1.117, L1.118, L1.119, L1.120, L1.121, L1.122, L1.123, L1.124, L1.125, L1.126, L1.127, L1.128, L1.129, L1.130, L1.131, L1.132, L1.133, L1.134, L1.135, L1.136, L1.137, L1.138, L1.139, L1.140, L1.141, L1.142, L1.143, L1.144, L1.145, L1.146, L1.147, L1.148, L1.149, L1.150, L1.151, L1.152, L1.153, L1.154, L1.155, L1.156, L1.157, L1.158, L1.159, L1.160, L1.161, L1.162, L1.163, L1.164, L1.165, L1.166, L1.167, L1.168, L1.169, L1.170, L1.171, L1.172, L1.173, L1.174, L1.175, L1.176, L1.177, L1.178, L1.179, L1.180, L1.181, L1.182, L1.183, L1.184, L1.185, L1.186, L1.187, L1.188, L1.189, L1.190, L1.191, L1.192, L1.193, L1.194, L1.195, L1.196, L1.197, L1.198, L1.199, L1.200"
- " Creating 1 files"
- - "**** Simulation run 1, type=compact(ManySmallFiles). 88 Input Files, 616kb total:"
+ - "**** Simulation run 1, type=compact(TotalSizeLessThanMaxCompactSize). 88 Input Files, 616kb total:"
- "L1, all files 7kb "
- - "L1.201[400,401] 201ns |L1.201| "
- - "L1.202[402,403] 202ns |L1.202| "
- - "L1.203[404,405] 203ns |L1.203| "
- - "L1.204[406,407] 204ns |L1.204| "
- - "L1.205[408,409] 205ns |L1.205| "
- - "L1.206[410,411] 206ns |L1.206| "
- - "L1.207[412,413] 207ns |L1.207| "
- - "L1.208[414,415] 208ns |L1.208| "
- - "L1.209[416,417] 209ns |L1.209| "
- - "L1.210[418,419] 210ns |L1.210| "
- - "L1.211[420,421] 211ns |L1.211| "
- - "L1.212[422,423] 212ns |L1.212| "
- - "L1.213[424,425] 213ns |L1.213| "
- - "L1.214[426,427] 214ns |L1.214| "
- - "L1.215[428,429] 215ns |L1.215| "
- - "L1.216[430,431] 216ns |L1.216| "
- - "L1.217[432,433] 217ns |L1.217| "
- - "L1.218[434,435] 218ns |L1.218| "
- - "L1.219[436,437] 219ns |L1.219| "
- - "L1.220[438,439] 220ns |L1.220| "
- - "L1.221[440,441] 221ns |L1.221| "
- - "L1.222[442,443] 222ns |L1.222| "
- - "L1.223[444,445] 223ns |L1.223| "
- - "L1.224[446,447] 224ns |L1.224| "
- - "L1.225[448,449] 225ns |L1.225| "
- - "L1.226[450,451] 226ns |L1.226| "
- - "L1.227[452,453] 227ns |L1.227| "
- - "L1.228[454,455] 228ns |L1.228| "
- - "L1.229[456,457] 229ns |L1.229| "
- - "L1.230[458,459] 230ns |L1.230| "
- - "L1.231[460,461] 231ns |L1.231| "
- - "L1.232[462,463] 232ns |L1.232| "
- - "L1.233[464,465] 233ns |L1.233| "
- - "L1.234[466,467] 234ns |L1.234| "
- - "L1.235[468,469] 235ns |L1.235| "
- - "L1.236[470,471] 236ns |L1.236| "
- - "L1.237[472,473] 237ns |L1.237| "
- - "L1.238[474,475] 238ns |L1.238| "
- - "L1.239[476,477] 239ns |L1.239| "
- - "L1.240[478,479] 240ns |L1.240| "
- - "L1.241[480,481] 241ns |L1.241| "
- - "L1.242[482,483] 242ns |L1.242| "
- - "L1.243[484,485] 243ns |L1.243| "
- - "L1.244[486,487] 244ns |L1.244| "
- - "L1.245[488,489] 245ns |L1.245| "
- - "L1.246[490,491] 246ns |L1.246| "
- - "L1.247[492,493] 247ns |L1.247| "
- - "L1.248[494,495] 248ns |L1.248| "
- - "L1.249[496,497] 249ns |L1.249| "
- - "L1.250[498,499] 250ns |L1.250| "
- - "L1.251[500,501] 251ns |L1.251| "
- - "L1.252[502,503] 252ns |L1.252| "
- - "L1.253[504,505] 253ns |L1.253| "
- - "L1.254[506,507] 254ns |L1.254| "
- - "L1.255[508,509] 255ns |L1.255| "
- - "L1.256[510,511] 256ns |L1.256| "
- - "L1.257[512,513] 257ns |L1.257| "
- - "L1.258[514,515] 258ns |L1.258| "
- - "L1.259[516,517] 259ns |L1.259| "
- - "L1.260[518,519] 260ns |L1.260| "
- - "L1.261[520,521] 261ns |L1.261| "
- - "L1.262[522,523] 262ns |L1.262| "
- - "L1.263[524,525] 263ns |L1.263| "
- - "L1.264[526,527] 264ns |L1.264| "
- - "L1.265[528,529] 265ns |L1.265| "
- - "L1.266[530,531] 266ns |L1.266| "
- - "L1.267[532,533] 267ns |L1.267| "
- - "L1.268[534,535] 268ns |L1.268| "
- - "L1.269[536,537] 269ns |L1.269| "
- - "L1.270[538,539] 270ns |L1.270| "
- - "L1.271[540,541] 271ns |L1.271| "
- - "L1.272[542,543] 272ns |L1.272| "
- - "L1.273[544,545] 273ns |L1.273| "
- - "L1.274[546,547] 274ns |L1.274| "
- - "L1.275[548,549] 275ns |L1.275| "
- - "L1.276[550,551] 276ns |L1.276| "
- - "L1.277[552,553] 277ns |L1.277| "
- - "L1.278[554,555] 278ns |L1.278| "
- - "L1.279[556,557] 279ns |L1.279| "
- - "L1.280[558,559] 280ns |L1.280| "
- - "L1.281[560,561] 281ns |L1.281|"
- - "L1.282[562,563] 282ns |L1.282|"
- - "L1.283[564,565] 283ns |L1.283|"
- - "L1.284[566,567] 284ns |L1.284|"
- - "L1.285[568,569] 285ns |L1.285|"
- - "L1.286[570,571] 286ns |L1.286|"
- - "L1.287[572,573] 287ns |L1.287|"
- "L1.288[574,575] 288ns |L1.288|"
+ - "L1.287[572,573] 287ns |L1.287|"
+ - "L1.286[570,571] 286ns |L1.286|"
+ - "L1.285[568,569] 285ns |L1.285|"
+ - "L1.284[566,567] 284ns |L1.284|"
+ - "L1.283[564,565] 283ns |L1.283|"
+ - "L1.282[562,563] 282ns |L1.282|"
+ - "L1.281[560,561] 281ns |L1.281|"
+ - "L1.280[558,559] 280ns |L1.280| "
+ - "L1.279[556,557] 279ns |L1.279| "
+ - "L1.278[554,555] 278ns |L1.278| "
+ - "L1.277[552,553] 277ns |L1.277| "
+ - "L1.276[550,551] 276ns |L1.276| "
+ - "L1.275[548,549] 275ns |L1.275| "
+ - "L1.274[546,547] 274ns |L1.274| "
+ - "L1.273[544,545] 273ns |L1.273| "
+ - "L1.272[542,543] 272ns |L1.272| "
+ - "L1.271[540,541] 271ns |L1.271| "
+ - "L1.270[538,539] 270ns |L1.270| "
+ - "L1.269[536,537] 269ns |L1.269| "
+ - "L1.268[534,535] 268ns |L1.268| "
+ - "L1.267[532,533] 267ns |L1.267| "
+ - "L1.266[530,531] 266ns |L1.266| "
+ - "L1.265[528,529] 265ns |L1.265| "
+ - "L1.264[526,527] 264ns |L1.264| "
+ - "L1.263[524,525] 263ns |L1.263| "
+ - "L1.262[522,523] 262ns |L1.262| "
+ - "L1.261[520,521] 261ns |L1.261| "
+ - "L1.260[518,519] 260ns |L1.260| "
+ - "L1.259[516,517] 259ns |L1.259| "
+ - "L1.258[514,515] 258ns |L1.258| "
+ - "L1.257[512,513] 257ns |L1.257| "
+ - "L1.256[510,511] 256ns |L1.256| "
+ - "L1.255[508,509] 255ns |L1.255| "
+ - "L1.254[506,507] 254ns |L1.254| "
+ - "L1.253[504,505] 253ns |L1.253| "
+ - "L1.252[502,503] 252ns |L1.252| "
+ - "L1.251[500,501] 251ns |L1.251| "
+ - "L1.250[498,499] 250ns |L1.250| "
+ - "L1.249[496,497] 249ns |L1.249| "
+ - "L1.248[494,495] 248ns |L1.248| "
+ - "L1.247[492,493] 247ns |L1.247| "
+ - "L1.246[490,491] 246ns |L1.246| "
+ - "L1.245[488,489] 245ns |L1.245| "
+ - "L1.244[486,487] 244ns |L1.244| "
+ - "L1.243[484,485] 243ns |L1.243| "
+ - "L1.242[482,483] 242ns |L1.242| "
+ - "L1.241[480,481] 241ns |L1.241| "
+ - "L1.240[478,479] 240ns |L1.240| "
+ - "L1.239[476,477] 239ns |L1.239| "
+ - "L1.238[474,475] 238ns |L1.238| "
+ - "L1.237[472,473] 237ns |L1.237| "
+ - "L1.236[470,471] 236ns |L1.236| "
+ - "L1.235[468,469] 235ns |L1.235| "
+ - "L1.234[466,467] 234ns |L1.234| "
+ - "L1.233[464,465] 233ns |L1.233| "
+ - "L1.232[462,463] 232ns |L1.232| "
+ - "L1.231[460,461] 231ns |L1.231| "
+ - "L1.230[458,459] 230ns |L1.230| "
+ - "L1.229[456,457] 229ns |L1.229| "
+ - "L1.228[454,455] 228ns |L1.228| "
+ - "L1.227[452,453] 227ns |L1.227| "
+ - "L1.226[450,451] 226ns |L1.226| "
+ - "L1.225[448,449] 225ns |L1.225| "
+ - "L1.224[446,447] 224ns |L1.224| "
+ - "L1.223[444,445] 223ns |L1.223| "
+ - "L1.222[442,443] 222ns |L1.222| "
+ - "L1.221[440,441] 221ns |L1.221| "
+ - "L1.220[438,439] 220ns |L1.220| "
+ - "L1.219[436,437] 219ns |L1.219| "
+ - "L1.218[434,435] 218ns |L1.218| "
+ - "L1.217[432,433] 217ns |L1.217| "
+ - "L1.216[430,431] 216ns |L1.216| "
+ - "L1.215[428,429] 215ns |L1.215| "
+ - "L1.214[426,427] 214ns |L1.214| "
+ - "L1.213[424,425] 213ns |L1.213| "
+ - "L1.212[422,423] 212ns |L1.212| "
+ - "L1.211[420,421] 211ns |L1.211| "
+ - "L1.210[418,419] 210ns |L1.210| "
+ - "L1.209[416,417] 209ns |L1.209| "
+ - "L1.208[414,415] 208ns |L1.208| "
+ - "L1.207[412,413] 207ns |L1.207| "
+ - "L1.206[410,411] 206ns |L1.206| "
+ - "L1.205[408,409] 205ns |L1.205| "
+ - "L1.204[406,407] 204ns |L1.204| "
+ - "L1.203[404,405] 203ns |L1.203| "
+ - "L1.202[402,403] 202ns |L1.202| "
+ - "L1.201[400,401] 201ns |L1.201| "
- "**** 1 Output Files (parquet_file_id not yet assigned), 616kb total:"
- - "L1, all files 616kb "
- - "L1.?[400,575] 288ns |------------------------------------------L1.?------------------------------------------|"
+ - "L2, all files 616kb "
+ - "L2.?[400,575] 288ns |------------------------------------------L2.?------------------------------------------|"
- "Committing partition 1:"
- " Soft Deleting 88 files: L1.201, L1.202, L1.203, L1.204, L1.205, L1.206, L1.207, L1.208, L1.209, L1.210, L1.211, L1.212, L1.213, L1.214, L1.215, L1.216, L1.217, L1.218, L1.219, L1.220, L1.221, L1.222, L1.223, L1.224, L1.225, L1.226, L1.227, L1.228, L1.229, L1.230, L1.231, L1.232, L1.233, L1.234, L1.235, L1.236, L1.237, L1.238, L1.239, L1.240, L1.241, L1.242, L1.243, L1.244, L1.245, L1.246, L1.247, L1.248, L1.249, L1.250, L1.251, L1.252, L1.253, L1.254, L1.255, L1.256, L1.257, L1.258, L1.259, L1.260, L1.261, L1.262, L1.263, L1.264, L1.265, L1.266, L1.267, L1.268, L1.269, L1.270, L1.271, L1.272, L1.273, L1.274, L1.275, L1.276, L1.277, L1.278, L1.279, L1.280, L1.281, L1.282, L1.283, L1.284, L1.285, L1.286, L1.287, L1.288"
- " Creating 1 files"
- "**** Final Output Files (2mb written)"
- - "L1 "
- - "L1.289[0,399] 200ns 1mb |---------------------------L1.289---------------------------| "
- - "L1.290[400,575] 288ns 616kb |---------L1.290----------| "
+ - "L2 "
+ - "L2.289[0,399] 200ns 1mb |---------------------------L2.289---------------------------| "
+ - "L2.290[400,575] 288ns 616kb |---------L2.290----------| "
"###
);
}
@@ -3670,24 +3653,10 @@ async fn l0s_almost_needing_vertical_split() {
.with_max_num_files_per_plan(max_files_per_plan as usize)
.with_max_desired_file_size_bytes(max_file_size)
.with_partition_timeout(Duration::from_millis(10000))
+ .with_suppress_run_output() // remove this to debug
.build()
.await;
- // // L1: 20 non overlapping files (more than with_min_num_l1_files_to_compact)
- // for i in 0..20 {
- // setup
- // .partition
- // .create_parquet_file(
- // parquet_builder()
- // .with_min_time(i * 2)
- // .with_max_time(i * 2 + 1)
- // .with_compaction_level(CompactionLevel::FileNonOverlapped)
- // // content in L1 files ingested before all available L0
- // .with_max_l0_created_at(Time::from_timestamp_nanos(i + 1))
- // .with_file_size_bytes(actual_file_size),
- // )
- // .await;
- // }
// L0: a few small files that overlap
for i in 0..1000 {
setup
@@ -4709,7637 +4678,6 @@ async fn l0s_almost_needing_vertical_split() {
- "L0.998[24,100] 1.02us |-----------------------------------------L0.998-----------------------------------------|"
- "L0.999[24,100] 1.02us |-----------------------------------------L0.999-----------------------------------------|"
- "L0.1000[24,100] 1.02us |----------------------------------------L0.1000-----------------------------------------|"
- - "**** Simulation run 0, type=split(CompactAndSplitOutput(ManySmallFiles))(split_times=[75]). 200 Input Files, 150mb total:"
- - "L0, all files 768kb "
- - "L0.1[24,100] 21ns |------------------------------------------L0.1------------------------------------------|"
- - "L0.2[24,100] 22ns |------------------------------------------L0.2------------------------------------------|"
- - "L0.3[24,100] 23ns |------------------------------------------L0.3------------------------------------------|"
- - "L0.4[24,100] 24ns |------------------------------------------L0.4------------------------------------------|"
- - "L0.5[24,100] 25ns |------------------------------------------L0.5------------------------------------------|"
- - "L0.6[24,100] 26ns |------------------------------------------L0.6------------------------------------------|"
- - "L0.7[24,100] 27ns |------------------------------------------L0.7------------------------------------------|"
- - "L0.8[24,100] 28ns |------------------------------------------L0.8------------------------------------------|"
- - "L0.9[24,100] 29ns |------------------------------------------L0.9------------------------------------------|"
- - "L0.10[24,100] 30ns |-----------------------------------------L0.10------------------------------------------|"
- - "L0.11[24,100] 31ns |-----------------------------------------L0.11------------------------------------------|"
- - "L0.12[24,100] 32ns |-----------------------------------------L0.12------------------------------------------|"
- - "L0.13[24,100] 33ns |-----------------------------------------L0.13------------------------------------------|"
- - "L0.14[24,100] 34ns |-----------------------------------------L0.14------------------------------------------|"
- - "L0.15[24,100] 35ns |-----------------------------------------L0.15------------------------------------------|"
- - "L0.16[24,100] 36ns |-----------------------------------------L0.16------------------------------------------|"
- - "L0.17[24,100] 37ns |-----------------------------------------L0.17------------------------------------------|"
- - "L0.18[24,100] 38ns |-----------------------------------------L0.18------------------------------------------|"
- - "L0.19[24,100] 39ns |-----------------------------------------L0.19------------------------------------------|"
- - "L0.20[24,100] 40ns |-----------------------------------------L0.20------------------------------------------|"
- - "L0.21[24,100] 41ns |-----------------------------------------L0.21------------------------------------------|"
- - "L0.22[24,100] 42ns |-----------------------------------------L0.22------------------------------------------|"
- - "L0.23[24,100] 43ns |-----------------------------------------L0.23------------------------------------------|"
- - "L0.24[24,100] 44ns |-----------------------------------------L0.24------------------------------------------|"
- - "L0.25[24,100] 45ns |-----------------------------------------L0.25------------------------------------------|"
- - "L0.26[24,100] 46ns |-----------------------------------------L0.26------------------------------------------|"
- - "L0.27[24,100] 47ns |-----------------------------------------L0.27------------------------------------------|"
- - "L0.28[24,100] 48ns |-----------------------------------------L0.28------------------------------------------|"
- - "L0.29[24,100] 49ns |-----------------------------------------L0.29------------------------------------------|"
- - "L0.30[24,100] 50ns |-----------------------------------------L0.30------------------------------------------|"
- - "L0.31[24,100] 51ns |-----------------------------------------L0.31------------------------------------------|"
- - "L0.32[24,100] 52ns |-----------------------------------------L0.32------------------------------------------|"
- - "L0.33[24,100] 53ns |-----------------------------------------L0.33------------------------------------------|"
- - "L0.34[24,100] 54ns |-----------------------------------------L0.34------------------------------------------|"
- - "L0.35[24,100] 55ns |-----------------------------------------L0.35------------------------------------------|"
- - "L0.36[24,100] 56ns |-----------------------------------------L0.36------------------------------------------|"
- - "L0.37[24,100] 57ns |-----------------------------------------L0.37------------------------------------------|"
- - "L0.38[24,100] 58ns |-----------------------------------------L0.38------------------------------------------|"
- - "L0.39[24,100] 59ns |-----------------------------------------L0.39------------------------------------------|"
- - "L0.40[24,100] 60ns |-----------------------------------------L0.40------------------------------------------|"
- - "L0.41[24,100] 61ns |-----------------------------------------L0.41------------------------------------------|"
- - "L0.42[24,100] 62ns |-----------------------------------------L0.42------------------------------------------|"
- - "L0.43[24,100] 63ns |-----------------------------------------L0.43------------------------------------------|"
- - "L0.44[24,100] 64ns |-----------------------------------------L0.44------------------------------------------|"
- - "L0.45[24,100] 65ns |-----------------------------------------L0.45------------------------------------------|"
- - "L0.46[24,100] 66ns |-----------------------------------------L0.46------------------------------------------|"
- - "L0.47[24,100] 67ns |-----------------------------------------L0.47------------------------------------------|"
- - "L0.48[24,100] 68ns |-----------------------------------------L0.48------------------------------------------|"
- - "L0.49[24,100] 69ns |-----------------------------------------L0.49------------------------------------------|"
- - "L0.50[24,100] 70ns |-----------------------------------------L0.50------------------------------------------|"
- - "L0.51[24,100] 71ns |-----------------------------------------L0.51------------------------------------------|"
- - "L0.52[24,100] 72ns |-----------------------------------------L0.52------------------------------------------|"
- - "L0.53[24,100] 73ns |-----------------------------------------L0.53------------------------------------------|"
- - "L0.54[24,100] 74ns |-----------------------------------------L0.54------------------------------------------|"
- - "L0.55[24,100] 75ns |-----------------------------------------L0.55------------------------------------------|"
- - "L0.56[24,100] 76ns |-----------------------------------------L0.56------------------------------------------|"
- - "L0.57[24,100] 77ns |-----------------------------------------L0.57------------------------------------------|"
- - "L0.58[24,100] 78ns |-----------------------------------------L0.58------------------------------------------|"
- - "L0.59[24,100] 79ns |-----------------------------------------L0.59------------------------------------------|"
- - "L0.60[24,100] 80ns |-----------------------------------------L0.60------------------------------------------|"
- - "L0.61[24,100] 81ns |-----------------------------------------L0.61------------------------------------------|"
- - "L0.62[24,100] 82ns |-----------------------------------------L0.62------------------------------------------|"
- - "L0.63[24,100] 83ns |-----------------------------------------L0.63------------------------------------------|"
- - "L0.64[24,100] 84ns |-----------------------------------------L0.64------------------------------------------|"
- - "L0.65[24,100] 85ns |-----------------------------------------L0.65------------------------------------------|"
- - "L0.66[24,100] 86ns |-----------------------------------------L0.66------------------------------------------|"
- - "L0.67[24,100] 87ns |-----------------------------------------L0.67------------------------------------------|"
- - "L0.68[24,100] 88ns |-----------------------------------------L0.68------------------------------------------|"
- - "L0.69[24,100] 89ns |-----------------------------------------L0.69------------------------------------------|"
- - "L0.70[24,100] 90ns |-----------------------------------------L0.70------------------------------------------|"
- - "L0.71[24,100] 91ns |-----------------------------------------L0.71------------------------------------------|"
- - "L0.72[24,100] 92ns |-----------------------------------------L0.72------------------------------------------|"
- - "L0.73[24,100] 93ns |-----------------------------------------L0.73------------------------------------------|"
- - "L0.74[24,100] 94ns |-----------------------------------------L0.74------------------------------------------|"
- - "L0.75[24,100] 95ns |-----------------------------------------L0.75------------------------------------------|"
- - "L0.76[24,100] 96ns |-----------------------------------------L0.76------------------------------------------|"
- - "L0.77[24,100] 97ns |-----------------------------------------L0.77------------------------------------------|"
- - "L0.78[24,100] 98ns |-----------------------------------------L0.78------------------------------------------|"
- - "L0.79[24,100] 99ns |-----------------------------------------L0.79------------------------------------------|"
- - "L0.80[24,100] 100ns |-----------------------------------------L0.80------------------------------------------|"
- - "L0.81[24,100] 101ns |-----------------------------------------L0.81------------------------------------------|"
- - "L0.82[24,100] 102ns |-----------------------------------------L0.82------------------------------------------|"
- - "L0.83[24,100] 103ns |-----------------------------------------L0.83------------------------------------------|"
- - "L0.84[24,100] 104ns |-----------------------------------------L0.84------------------------------------------|"
- - "L0.85[24,100] 105ns |-----------------------------------------L0.85------------------------------------------|"
- - "L0.86[24,100] 106ns |-----------------------------------------L0.86------------------------------------------|"
- - "L0.87[24,100] 107ns |-----------------------------------------L0.87------------------------------------------|"
- - "L0.88[24,100] 108ns |-----------------------------------------L0.88------------------------------------------|"
- - "L0.89[24,100] 109ns |-----------------------------------------L0.89------------------------------------------|"
- - "L0.90[24,100] 110ns |-----------------------------------------L0.90------------------------------------------|"
- - "L0.91[24,100] 111ns |-----------------------------------------L0.91------------------------------------------|"
- - "L0.92[24,100] 112ns |-----------------------------------------L0.92------------------------------------------|"
- - "L0.93[24,100] 113ns |-----------------------------------------L0.93------------------------------------------|"
- - "L0.94[24,100] 114ns |-----------------------------------------L0.94------------------------------------------|"
- - "L0.95[24,100] 115ns |-----------------------------------------L0.95------------------------------------------|"
- - "L0.96[24,100] 116ns |-----------------------------------------L0.96------------------------------------------|"
- - "L0.97[24,100] 117ns |-----------------------------------------L0.97------------------------------------------|"
- - "L0.98[24,100] 118ns |-----------------------------------------L0.98------------------------------------------|"
- - "L0.99[24,100] 119ns |-----------------------------------------L0.99------------------------------------------|"
- - "L0.100[24,100] 120ns |-----------------------------------------L0.100-----------------------------------------|"
- - "L0.101[24,100] 121ns |-----------------------------------------L0.101-----------------------------------------|"
- - "L0.102[24,100] 122ns |-----------------------------------------L0.102-----------------------------------------|"
- - "L0.103[24,100] 123ns |-----------------------------------------L0.103-----------------------------------------|"
- - "L0.104[24,100] 124ns |-----------------------------------------L0.104-----------------------------------------|"
- - "L0.105[24,100] 125ns |-----------------------------------------L0.105-----------------------------------------|"
- - "L0.106[24,100] 126ns |-----------------------------------------L0.106-----------------------------------------|"
- - "L0.107[24,100] 127ns |-----------------------------------------L0.107-----------------------------------------|"
- - "L0.108[24,100] 128ns |-----------------------------------------L0.108-----------------------------------------|"
- - "L0.109[24,100] 129ns |-----------------------------------------L0.109-----------------------------------------|"
- - "L0.110[24,100] 130ns |-----------------------------------------L0.110-----------------------------------------|"
- - "L0.111[24,100] 131ns |-----------------------------------------L0.111-----------------------------------------|"
- - "L0.112[24,100] 132ns |-----------------------------------------L0.112-----------------------------------------|"
- - "L0.113[24,100] 133ns |-----------------------------------------L0.113-----------------------------------------|"
- - "L0.114[24,100] 134ns |-----------------------------------------L0.114-----------------------------------------|"
- - "L0.115[24,100] 135ns |-----------------------------------------L0.115-----------------------------------------|"
- - "L0.116[24,100] 136ns |-----------------------------------------L0.116-----------------------------------------|"
- - "L0.117[24,100] 137ns |-----------------------------------------L0.117-----------------------------------------|"
- - "L0.118[24,100] 138ns |-----------------------------------------L0.118-----------------------------------------|"
- - "L0.119[24,100] 139ns |-----------------------------------------L0.119-----------------------------------------|"
- - "L0.120[24,100] 140ns |-----------------------------------------L0.120-----------------------------------------|"
- - "L0.121[24,100] 141ns |-----------------------------------------L0.121-----------------------------------------|"
- - "L0.122[24,100] 142ns |-----------------------------------------L0.122-----------------------------------------|"
- - "L0.123[24,100] 143ns |-----------------------------------------L0.123-----------------------------------------|"
- - "L0.124[24,100] 144ns |-----------------------------------------L0.124-----------------------------------------|"
- - "L0.125[24,100] 145ns |-----------------------------------------L0.125-----------------------------------------|"
- - "L0.126[24,100] 146ns |-----------------------------------------L0.126-----------------------------------------|"
- - "L0.127[24,100] 147ns |-----------------------------------------L0.127-----------------------------------------|"
- - "L0.128[24,100] 148ns |-----------------------------------------L0.128-----------------------------------------|"
- - "L0.129[24,100] 149ns |-----------------------------------------L0.129-----------------------------------------|"
- - "L0.130[24,100] 150ns |-----------------------------------------L0.130-----------------------------------------|"
- - "L0.131[24,100] 151ns |-----------------------------------------L0.131-----------------------------------------|"
- - "L0.132[24,100] 152ns |-----------------------------------------L0.132-----------------------------------------|"
- - "L0.133[24,100] 153ns |-----------------------------------------L0.133-----------------------------------------|"
- - "L0.134[24,100] 154ns |-----------------------------------------L0.134-----------------------------------------|"
- - "L0.135[24,100] 155ns |-----------------------------------------L0.135-----------------------------------------|"
- - "L0.136[24,100] 156ns |-----------------------------------------L0.136-----------------------------------------|"
- - "L0.137[24,100] 157ns |-----------------------------------------L0.137-----------------------------------------|"
- - "L0.138[24,100] 158ns |-----------------------------------------L0.138-----------------------------------------|"
- - "L0.139[24,100] 159ns |-----------------------------------------L0.139-----------------------------------------|"
- - "L0.140[24,100] 160ns |-----------------------------------------L0.140-----------------------------------------|"
- - "L0.141[24,100] 161ns |-----------------------------------------L0.141-----------------------------------------|"
- - "L0.142[24,100] 162ns |-----------------------------------------L0.142-----------------------------------------|"
- - "L0.143[24,100] 163ns |-----------------------------------------L0.143-----------------------------------------|"
- - "L0.144[24,100] 164ns |-----------------------------------------L0.144-----------------------------------------|"
- - "L0.145[24,100] 165ns |-----------------------------------------L0.145-----------------------------------------|"
- - "L0.146[24,100] 166ns |-----------------------------------------L0.146-----------------------------------------|"
- - "L0.147[24,100] 167ns |-----------------------------------------L0.147-----------------------------------------|"
- - "L0.148[24,100] 168ns |-----------------------------------------L0.148-----------------------------------------|"
- - "L0.149[24,100] 169ns |-----------------------------------------L0.149-----------------------------------------|"
- - "L0.150[24,100] 170ns |-----------------------------------------L0.150-----------------------------------------|"
- - "L0.151[24,100] 171ns |-----------------------------------------L0.151-----------------------------------------|"
- - "L0.152[24,100] 172ns |-----------------------------------------L0.152-----------------------------------------|"
- - "L0.153[24,100] 173ns |-----------------------------------------L0.153-----------------------------------------|"
- - "L0.154[24,100] 174ns |-----------------------------------------L0.154-----------------------------------------|"
- - "L0.155[24,100] 175ns |-----------------------------------------L0.155-----------------------------------------|"
- - "L0.156[24,100] 176ns |-----------------------------------------L0.156-----------------------------------------|"
- - "L0.157[24,100] 177ns |-----------------------------------------L0.157-----------------------------------------|"
- - "L0.158[24,100] 178ns |-----------------------------------------L0.158-----------------------------------------|"
- - "L0.159[24,100] 179ns |-----------------------------------------L0.159-----------------------------------------|"
- - "L0.160[24,100] 180ns |-----------------------------------------L0.160-----------------------------------------|"
- - "L0.161[24,100] 181ns |-----------------------------------------L0.161-----------------------------------------|"
- - "L0.162[24,100] 182ns |-----------------------------------------L0.162-----------------------------------------|"
- - "L0.163[24,100] 183ns |-----------------------------------------L0.163-----------------------------------------|"
- - "L0.164[24,100] 184ns |-----------------------------------------L0.164-----------------------------------------|"
- - "L0.165[24,100] 185ns |-----------------------------------------L0.165-----------------------------------------|"
- - "L0.166[24,100] 186ns |-----------------------------------------L0.166-----------------------------------------|"
- - "L0.167[24,100] 187ns |-----------------------------------------L0.167-----------------------------------------|"
- - "L0.168[24,100] 188ns |-----------------------------------------L0.168-----------------------------------------|"
- - "L0.169[24,100] 189ns |-----------------------------------------L0.169-----------------------------------------|"
- - "L0.170[24,100] 190ns |-----------------------------------------L0.170-----------------------------------------|"
- - "L0.171[24,100] 191ns |-----------------------------------------L0.171-----------------------------------------|"
- - "L0.172[24,100] 192ns |-----------------------------------------L0.172-----------------------------------------|"
- - "L0.173[24,100] 193ns |-----------------------------------------L0.173-----------------------------------------|"
- - "L0.174[24,100] 194ns |-----------------------------------------L0.174-----------------------------------------|"
- - "L0.175[24,100] 195ns |-----------------------------------------L0.175-----------------------------------------|"
- - "L0.176[24,100] 196ns |-----------------------------------------L0.176-----------------------------------------|"
- - "L0.177[24,100] 197ns |-----------------------------------------L0.177-----------------------------------------|"
- - "L0.178[24,100] 198ns |-----------------------------------------L0.178-----------------------------------------|"
- - "L0.179[24,100] 199ns |-----------------------------------------L0.179-----------------------------------------|"
- - "L0.180[24,100] 200ns |-----------------------------------------L0.180-----------------------------------------|"
- - "L0.181[24,100] 201ns |-----------------------------------------L0.181-----------------------------------------|"
- - "L0.182[24,100] 202ns |-----------------------------------------L0.182-----------------------------------------|"
- - "L0.183[24,100] 203ns |-----------------------------------------L0.183-----------------------------------------|"
- - "L0.184[24,100] 204ns |-----------------------------------------L0.184-----------------------------------------|"
- - "L0.185[24,100] 205ns |-----------------------------------------L0.185-----------------------------------------|"
- - "L0.186[24,100] 206ns |-----------------------------------------L0.186-----------------------------------------|"
- - "L0.187[24,100] 207ns |-----------------------------------------L0.187-----------------------------------------|"
- - "L0.188[24,100] 208ns |-----------------------------------------L0.188-----------------------------------------|"
- - "L0.189[24,100] 209ns |-----------------------------------------L0.189-----------------------------------------|"
- - "L0.190[24,100] 210ns |-----------------------------------------L0.190-----------------------------------------|"
- - "L0.191[24,100] 211ns |-----------------------------------------L0.191-----------------------------------------|"
- - "L0.192[24,100] 212ns |-----------------------------------------L0.192-----------------------------------------|"
- - "L0.193[24,100] 213ns |-----------------------------------------L0.193-----------------------------------------|"
- - "L0.194[24,100] 214ns |-----------------------------------------L0.194-----------------------------------------|"
- - "L0.195[24,100] 215ns |-----------------------------------------L0.195-----------------------------------------|"
- - "L0.196[24,100] 216ns |-----------------------------------------L0.196-----------------------------------------|"
- - "L0.197[24,100] 217ns |-----------------------------------------L0.197-----------------------------------------|"
- - "L0.198[24,100] 218ns |-----------------------------------------L0.198-----------------------------------------|"
- - "L0.199[24,100] 219ns |-----------------------------------------L0.199-----------------------------------------|"
- - "L0.200[24,100] 220ns |-----------------------------------------L0.200-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 150mb total:"
- - "L0 "
- - "L0.?[24,75] 220ns 101mb |---------------------------L0.?---------------------------| "
- - "L0.?[76,100] 220ns 49mb |-----------L0.?-----------| "
- - "Committing partition 1:"
- - " Soft Deleting 200 files: L0.1, L0.2, L0.3, L0.4, L0.5, L0.6, L0.7, L0.8, L0.9, L0.10, L0.11, L0.12, L0.13, L0.14, L0.15, L0.16, L0.17, L0.18, L0.19, L0.20, L0.21, L0.22, L0.23, L0.24, L0.25, L0.26, L0.27, L0.28, L0.29, L0.30, L0.31, L0.32, L0.33, L0.34, L0.35, L0.36, L0.37, L0.38, L0.39, L0.40, L0.41, L0.42, L0.43, L0.44, L0.45, L0.46, L0.47, L0.48, L0.49, L0.50, L0.51, L0.52, L0.53, L0.54, L0.55, L0.56, L0.57, L0.58, L0.59, L0.60, L0.61, L0.62, L0.63, L0.64, L0.65, L0.66, L0.67, L0.68, L0.69, L0.70, L0.71, L0.72, L0.73, L0.74, L0.75, L0.76, L0.77, L0.78, L0.79, L0.80, L0.81, L0.82, L0.83, L0.84, L0.85, L0.86, L0.87, L0.88, L0.89, L0.90, L0.91, L0.92, L0.93, L0.94, L0.95, L0.96, L0.97, L0.98, L0.99, L0.100, L0.101, L0.102, L0.103, L0.104, L0.105, L0.106, L0.107, L0.108, L0.109, L0.110, L0.111, L0.112, L0.113, L0.114, L0.115, L0.116, L0.117, L0.118, L0.119, L0.120, L0.121, L0.122, L0.123, L0.124, L0.125, L0.126, L0.127, L0.128, L0.129, L0.130, L0.131, L0.132, L0.133, L0.134, L0.135, L0.136, L0.137, L0.138, L0.139, L0.140, L0.141, L0.142, L0.143, L0.144, L0.145, L0.146, L0.147, L0.148, L0.149, L0.150, L0.151, L0.152, L0.153, L0.154, L0.155, L0.156, L0.157, L0.158, L0.159, L0.160, L0.161, L0.162, L0.163, L0.164, L0.165, L0.166, L0.167, L0.168, L0.169, L0.170, L0.171, L0.172, L0.173, L0.174, L0.175, L0.176, L0.177, L0.178, L0.179, L0.180, L0.181, L0.182, L0.183, L0.184, L0.185, L0.186, L0.187, L0.188, L0.189, L0.190, L0.191, L0.192, L0.193, L0.194, L0.195, L0.196, L0.197, L0.198, L0.199, L0.200"
- - " Creating 2 files"
- - "**** Simulation run 1, type=split(CompactAndSplitOutput(ManySmallFiles))(split_times=[50, 76]). 200 Input Files, 299mb total:"
- - "L0 "
- - "L0.1001[24,75] 220ns 101mb|-------------------------L0.1001--------------------------| "
- - "L0.1002[76,100] 220ns 49mb |---------L0.1002----------| "
- - "L0.201[24,100] 221ns 768kb|-----------------------------------------L0.201-----------------------------------------|"
- - "L0.202[24,100] 222ns 768kb|-----------------------------------------L0.202-----------------------------------------|"
- - "L0.203[24,100] 223ns 768kb|-----------------------------------------L0.203-----------------------------------------|"
- - "L0.204[24,100] 224ns 768kb|-----------------------------------------L0.204-----------------------------------------|"
- - "L0.205[24,100] 225ns 768kb|-----------------------------------------L0.205-----------------------------------------|"
- - "L0.206[24,100] 226ns 768kb|-----------------------------------------L0.206-----------------------------------------|"
- - "L0.207[24,100] 227ns 768kb|-----------------------------------------L0.207-----------------------------------------|"
- - "L0.208[24,100] 228ns 768kb|-----------------------------------------L0.208-----------------------------------------|"
- - "L0.209[24,100] 229ns 768kb|-----------------------------------------L0.209-----------------------------------------|"
- - "L0.210[24,100] 230ns 768kb|-----------------------------------------L0.210-----------------------------------------|"
- - "L0.211[24,100] 231ns 768kb|-----------------------------------------L0.211-----------------------------------------|"
- - "L0.212[24,100] 232ns 768kb|-----------------------------------------L0.212-----------------------------------------|"
- - "L0.213[24,100] 233ns 768kb|-----------------------------------------L0.213-----------------------------------------|"
- - "L0.214[24,100] 234ns 768kb|-----------------------------------------L0.214-----------------------------------------|"
- - "L0.215[24,100] 235ns 768kb|-----------------------------------------L0.215-----------------------------------------|"
- - "L0.216[24,100] 236ns 768kb|-----------------------------------------L0.216-----------------------------------------|"
- - "L0.217[24,100] 237ns 768kb|-----------------------------------------L0.217-----------------------------------------|"
- - "L0.218[24,100] 238ns 768kb|-----------------------------------------L0.218-----------------------------------------|"
- - "L0.219[24,100] 239ns 768kb|-----------------------------------------L0.219-----------------------------------------|"
- - "L0.220[24,100] 240ns 768kb|-----------------------------------------L0.220-----------------------------------------|"
- - "L0.221[24,100] 241ns 768kb|-----------------------------------------L0.221-----------------------------------------|"
- - "L0.222[24,100] 242ns 768kb|-----------------------------------------L0.222-----------------------------------------|"
- - "L0.223[24,100] 243ns 768kb|-----------------------------------------L0.223-----------------------------------------|"
- - "L0.224[24,100] 244ns 768kb|-----------------------------------------L0.224-----------------------------------------|"
- - "L0.225[24,100] 245ns 768kb|-----------------------------------------L0.225-----------------------------------------|"
- - "L0.226[24,100] 246ns 768kb|-----------------------------------------L0.226-----------------------------------------|"
- - "L0.227[24,100] 247ns 768kb|-----------------------------------------L0.227-----------------------------------------|"
- - "L0.228[24,100] 248ns 768kb|-----------------------------------------L0.228-----------------------------------------|"
- - "L0.229[24,100] 249ns 768kb|-----------------------------------------L0.229-----------------------------------------|"
- - "L0.230[24,100] 250ns 768kb|-----------------------------------------L0.230-----------------------------------------|"
- - "L0.231[24,100] 251ns 768kb|-----------------------------------------L0.231-----------------------------------------|"
- - "L0.232[24,100] 252ns 768kb|-----------------------------------------L0.232-----------------------------------------|"
- - "L0.233[24,100] 253ns 768kb|-----------------------------------------L0.233-----------------------------------------|"
- - "L0.234[24,100] 254ns 768kb|-----------------------------------------L0.234-----------------------------------------|"
- - "L0.235[24,100] 255ns 768kb|-----------------------------------------L0.235-----------------------------------------|"
- - "L0.236[24,100] 256ns 768kb|-----------------------------------------L0.236-----------------------------------------|"
- - "L0.237[24,100] 257ns 768kb|-----------------------------------------L0.237-----------------------------------------|"
- - "L0.238[24,100] 258ns 768kb|-----------------------------------------L0.238-----------------------------------------|"
- - "L0.239[24,100] 259ns 768kb|-----------------------------------------L0.239-----------------------------------------|"
- - "L0.240[24,100] 260ns 768kb|-----------------------------------------L0.240-----------------------------------------|"
- - "L0.241[24,100] 261ns 768kb|-----------------------------------------L0.241-----------------------------------------|"
- - "L0.242[24,100] 262ns 768kb|-----------------------------------------L0.242-----------------------------------------|"
- - "L0.243[24,100] 263ns 768kb|-----------------------------------------L0.243-----------------------------------------|"
- - "L0.244[24,100] 264ns 768kb|-----------------------------------------L0.244-----------------------------------------|"
- - "L0.245[24,100] 265ns 768kb|-----------------------------------------L0.245-----------------------------------------|"
- - "L0.246[24,100] 266ns 768kb|-----------------------------------------L0.246-----------------------------------------|"
- - "L0.247[24,100] 267ns 768kb|-----------------------------------------L0.247-----------------------------------------|"
- - "L0.248[24,100] 268ns 768kb|-----------------------------------------L0.248-----------------------------------------|"
- - "L0.249[24,100] 269ns 768kb|-----------------------------------------L0.249-----------------------------------------|"
- - "L0.250[24,100] 270ns 768kb|-----------------------------------------L0.250-----------------------------------------|"
- - "L0.251[24,100] 271ns 768kb|-----------------------------------------L0.251-----------------------------------------|"
- - "L0.252[24,100] 272ns 768kb|-----------------------------------------L0.252-----------------------------------------|"
- - "L0.253[24,100] 273ns 768kb|-----------------------------------------L0.253-----------------------------------------|"
- - "L0.254[24,100] 274ns 768kb|-----------------------------------------L0.254-----------------------------------------|"
- - "L0.255[24,100] 275ns 768kb|-----------------------------------------L0.255-----------------------------------------|"
- - "L0.256[24,100] 276ns 768kb|-----------------------------------------L0.256-----------------------------------------|"
- - "L0.257[24,100] 277ns 768kb|-----------------------------------------L0.257-----------------------------------------|"
- - "L0.258[24,100] 278ns 768kb|-----------------------------------------L0.258-----------------------------------------|"
- - "L0.259[24,100] 279ns 768kb|-----------------------------------------L0.259-----------------------------------------|"
- - "L0.260[24,100] 280ns 768kb|-----------------------------------------L0.260-----------------------------------------|"
- - "L0.261[24,100] 281ns 768kb|-----------------------------------------L0.261-----------------------------------------|"
- - "L0.262[24,100] 282ns 768kb|-----------------------------------------L0.262-----------------------------------------|"
- - "L0.263[24,100] 283ns 768kb|-----------------------------------------L0.263-----------------------------------------|"
- - "L0.264[24,100] 284ns 768kb|-----------------------------------------L0.264-----------------------------------------|"
- - "L0.265[24,100] 285ns 768kb|-----------------------------------------L0.265-----------------------------------------|"
- - "L0.266[24,100] 286ns 768kb|-----------------------------------------L0.266-----------------------------------------|"
- - "L0.267[24,100] 287ns 768kb|-----------------------------------------L0.267-----------------------------------------|"
- - "L0.268[24,100] 288ns 768kb|-----------------------------------------L0.268-----------------------------------------|"
- - "L0.269[24,100] 289ns 768kb|-----------------------------------------L0.269-----------------------------------------|"
- - "L0.270[24,100] 290ns 768kb|-----------------------------------------L0.270-----------------------------------------|"
- - "L0.271[24,100] 291ns 768kb|-----------------------------------------L0.271-----------------------------------------|"
- - "L0.272[24,100] 292ns 768kb|-----------------------------------------L0.272-----------------------------------------|"
- - "L0.273[24,100] 293ns 768kb|-----------------------------------------L0.273-----------------------------------------|"
- - "L0.274[24,100] 294ns 768kb|-----------------------------------------L0.274-----------------------------------------|"
- - "L0.275[24,100] 295ns 768kb|-----------------------------------------L0.275-----------------------------------------|"
- - "L0.276[24,100] 296ns 768kb|-----------------------------------------L0.276-----------------------------------------|"
- - "L0.277[24,100] 297ns 768kb|-----------------------------------------L0.277-----------------------------------------|"
- - "L0.278[24,100] 298ns 768kb|-----------------------------------------L0.278-----------------------------------------|"
- - "L0.279[24,100] 299ns 768kb|-----------------------------------------L0.279-----------------------------------------|"
- - "L0.280[24,100] 300ns 768kb|-----------------------------------------L0.280-----------------------------------------|"
- - "L0.281[24,100] 301ns 768kb|-----------------------------------------L0.281-----------------------------------------|"
- - "L0.282[24,100] 302ns 768kb|-----------------------------------------L0.282-----------------------------------------|"
- - "L0.283[24,100] 303ns 768kb|-----------------------------------------L0.283-----------------------------------------|"
- - "L0.284[24,100] 304ns 768kb|-----------------------------------------L0.284-----------------------------------------|"
- - "L0.285[24,100] 305ns 768kb|-----------------------------------------L0.285-----------------------------------------|"
- - "L0.286[24,100] 306ns 768kb|-----------------------------------------L0.286-----------------------------------------|"
- - "L0.287[24,100] 307ns 768kb|-----------------------------------------L0.287-----------------------------------------|"
- - "L0.288[24,100] 308ns 768kb|-----------------------------------------L0.288-----------------------------------------|"
- - "L0.289[24,100] 309ns 768kb|-----------------------------------------L0.289-----------------------------------------|"
- - "L0.290[24,100] 310ns 768kb|-----------------------------------------L0.290-----------------------------------------|"
- - "L0.291[24,100] 311ns 768kb|-----------------------------------------L0.291-----------------------------------------|"
- - "L0.292[24,100] 312ns 768kb|-----------------------------------------L0.292-----------------------------------------|"
- - "L0.293[24,100] 313ns 768kb|-----------------------------------------L0.293-----------------------------------------|"
- - "L0.294[24,100] 314ns 768kb|-----------------------------------------L0.294-----------------------------------------|"
- - "L0.295[24,100] 315ns 768kb|-----------------------------------------L0.295-----------------------------------------|"
- - "L0.296[24,100] 316ns 768kb|-----------------------------------------L0.296-----------------------------------------|"
- - "L0.297[24,100] 317ns 768kb|-----------------------------------------L0.297-----------------------------------------|"
- - "L0.298[24,100] 318ns 768kb|-----------------------------------------L0.298-----------------------------------------|"
- - "L0.299[24,100] 319ns 768kb|-----------------------------------------L0.299-----------------------------------------|"
- - "L0.300[24,100] 320ns 768kb|-----------------------------------------L0.300-----------------------------------------|"
- - "L0.301[24,100] 321ns 768kb|-----------------------------------------L0.301-----------------------------------------|"
- - "L0.302[24,100] 322ns 768kb|-----------------------------------------L0.302-----------------------------------------|"
- - "L0.303[24,100] 323ns 768kb|-----------------------------------------L0.303-----------------------------------------|"
- - "L0.304[24,100] 324ns 768kb|-----------------------------------------L0.304-----------------------------------------|"
- - "L0.305[24,100] 325ns 768kb|-----------------------------------------L0.305-----------------------------------------|"
- - "L0.306[24,100] 326ns 768kb|-----------------------------------------L0.306-----------------------------------------|"
- - "L0.307[24,100] 327ns 768kb|-----------------------------------------L0.307-----------------------------------------|"
- - "L0.308[24,100] 328ns 768kb|-----------------------------------------L0.308-----------------------------------------|"
- - "L0.309[24,100] 329ns 768kb|-----------------------------------------L0.309-----------------------------------------|"
- - "L0.310[24,100] 330ns 768kb|-----------------------------------------L0.310-----------------------------------------|"
- - "L0.311[24,100] 331ns 768kb|-----------------------------------------L0.311-----------------------------------------|"
- - "L0.312[24,100] 332ns 768kb|-----------------------------------------L0.312-----------------------------------------|"
- - "L0.313[24,100] 333ns 768kb|-----------------------------------------L0.313-----------------------------------------|"
- - "L0.314[24,100] 334ns 768kb|-----------------------------------------L0.314-----------------------------------------|"
- - "L0.315[24,100] 335ns 768kb|-----------------------------------------L0.315-----------------------------------------|"
- - "L0.316[24,100] 336ns 768kb|-----------------------------------------L0.316-----------------------------------------|"
- - "L0.317[24,100] 337ns 768kb|-----------------------------------------L0.317-----------------------------------------|"
- - "L0.318[24,100] 338ns 768kb|-----------------------------------------L0.318-----------------------------------------|"
- - "L0.319[24,100] 339ns 768kb|-----------------------------------------L0.319-----------------------------------------|"
- - "L0.320[24,100] 340ns 768kb|-----------------------------------------L0.320-----------------------------------------|"
- - "L0.321[24,100] 341ns 768kb|-----------------------------------------L0.321-----------------------------------------|"
- - "L0.322[24,100] 342ns 768kb|-----------------------------------------L0.322-----------------------------------------|"
- - "L0.323[24,100] 343ns 768kb|-----------------------------------------L0.323-----------------------------------------|"
- - "L0.324[24,100] 344ns 768kb|-----------------------------------------L0.324-----------------------------------------|"
- - "L0.325[24,100] 345ns 768kb|-----------------------------------------L0.325-----------------------------------------|"
- - "L0.326[24,100] 346ns 768kb|-----------------------------------------L0.326-----------------------------------------|"
- - "L0.327[24,100] 347ns 768kb|-----------------------------------------L0.327-----------------------------------------|"
- - "L0.328[24,100] 348ns 768kb|-----------------------------------------L0.328-----------------------------------------|"
- - "L0.329[24,100] 349ns 768kb|-----------------------------------------L0.329-----------------------------------------|"
- - "L0.330[24,100] 350ns 768kb|-----------------------------------------L0.330-----------------------------------------|"
- - "L0.331[24,100] 351ns 768kb|-----------------------------------------L0.331-----------------------------------------|"
- - "L0.332[24,100] 352ns 768kb|-----------------------------------------L0.332-----------------------------------------|"
- - "L0.333[24,100] 353ns 768kb|-----------------------------------------L0.333-----------------------------------------|"
- - "L0.334[24,100] 354ns 768kb|-----------------------------------------L0.334-----------------------------------------|"
- - "L0.335[24,100] 355ns 768kb|-----------------------------------------L0.335-----------------------------------------|"
- - "L0.336[24,100] 356ns 768kb|-----------------------------------------L0.336-----------------------------------------|"
- - "L0.337[24,100] 357ns 768kb|-----------------------------------------L0.337-----------------------------------------|"
- - "L0.338[24,100] 358ns 768kb|-----------------------------------------L0.338-----------------------------------------|"
- - "L0.339[24,100] 359ns 768kb|-----------------------------------------L0.339-----------------------------------------|"
- - "L0.340[24,100] 360ns 768kb|-----------------------------------------L0.340-----------------------------------------|"
- - "L0.341[24,100] 361ns 768kb|-----------------------------------------L0.341-----------------------------------------|"
- - "L0.342[24,100] 362ns 768kb|-----------------------------------------L0.342-----------------------------------------|"
- - "L0.343[24,100] 363ns 768kb|-----------------------------------------L0.343-----------------------------------------|"
- - "L0.344[24,100] 364ns 768kb|-----------------------------------------L0.344-----------------------------------------|"
- - "L0.345[24,100] 365ns 768kb|-----------------------------------------L0.345-----------------------------------------|"
- - "L0.346[24,100] 366ns 768kb|-----------------------------------------L0.346-----------------------------------------|"
- - "L0.347[24,100] 367ns 768kb|-----------------------------------------L0.347-----------------------------------------|"
- - "L0.348[24,100] 368ns 768kb|-----------------------------------------L0.348-----------------------------------------|"
- - "L0.349[24,100] 369ns 768kb|-----------------------------------------L0.349-----------------------------------------|"
- - "L0.350[24,100] 370ns 768kb|-----------------------------------------L0.350-----------------------------------------|"
- - "L0.351[24,100] 371ns 768kb|-----------------------------------------L0.351-----------------------------------------|"
- - "L0.352[24,100] 372ns 768kb|-----------------------------------------L0.352-----------------------------------------|"
- - "L0.353[24,100] 373ns 768kb|-----------------------------------------L0.353-----------------------------------------|"
- - "L0.354[24,100] 374ns 768kb|-----------------------------------------L0.354-----------------------------------------|"
- - "L0.355[24,100] 375ns 768kb|-----------------------------------------L0.355-----------------------------------------|"
- - "L0.356[24,100] 376ns 768kb|-----------------------------------------L0.356-----------------------------------------|"
- - "L0.357[24,100] 377ns 768kb|-----------------------------------------L0.357-----------------------------------------|"
- - "L0.358[24,100] 378ns 768kb|-----------------------------------------L0.358-----------------------------------------|"
- - "L0.359[24,100] 379ns 768kb|-----------------------------------------L0.359-----------------------------------------|"
- - "L0.360[24,100] 380ns 768kb|-----------------------------------------L0.360-----------------------------------------|"
- - "L0.361[24,100] 381ns 768kb|-----------------------------------------L0.361-----------------------------------------|"
- - "L0.362[24,100] 382ns 768kb|-----------------------------------------L0.362-----------------------------------------|"
- - "L0.363[24,100] 383ns 768kb|-----------------------------------------L0.363-----------------------------------------|"
- - "L0.364[24,100] 384ns 768kb|-----------------------------------------L0.364-----------------------------------------|"
- - "L0.365[24,100] 385ns 768kb|-----------------------------------------L0.365-----------------------------------------|"
- - "L0.366[24,100] 386ns 768kb|-----------------------------------------L0.366-----------------------------------------|"
- - "L0.367[24,100] 387ns 768kb|-----------------------------------------L0.367-----------------------------------------|"
- - "L0.368[24,100] 388ns 768kb|-----------------------------------------L0.368-----------------------------------------|"
- - "L0.369[24,100] 389ns 768kb|-----------------------------------------L0.369-----------------------------------------|"
- - "L0.370[24,100] 390ns 768kb|-----------------------------------------L0.370-----------------------------------------|"
- - "L0.371[24,100] 391ns 768kb|-----------------------------------------L0.371-----------------------------------------|"
- - "L0.372[24,100] 392ns 768kb|-----------------------------------------L0.372-----------------------------------------|"
- - "L0.373[24,100] 393ns 768kb|-----------------------------------------L0.373-----------------------------------------|"
- - "L0.374[24,100] 394ns 768kb|-----------------------------------------L0.374-----------------------------------------|"
- - "L0.375[24,100] 395ns 768kb|-----------------------------------------L0.375-----------------------------------------|"
- - "L0.376[24,100] 396ns 768kb|-----------------------------------------L0.376-----------------------------------------|"
- - "L0.377[24,100] 397ns 768kb|-----------------------------------------L0.377-----------------------------------------|"
- - "L0.378[24,100] 398ns 768kb|-----------------------------------------L0.378-----------------------------------------|"
- - "L0.379[24,100] 399ns 768kb|-----------------------------------------L0.379-----------------------------------------|"
- - "L0.380[24,100] 400ns 768kb|-----------------------------------------L0.380-----------------------------------------|"
- - "L0.381[24,100] 401ns 768kb|-----------------------------------------L0.381-----------------------------------------|"
- - "L0.382[24,100] 402ns 768kb|-----------------------------------------L0.382-----------------------------------------|"
- - "L0.383[24,100] 403ns 768kb|-----------------------------------------L0.383-----------------------------------------|"
- - "L0.384[24,100] 404ns 768kb|-----------------------------------------L0.384-----------------------------------------|"
- - "L0.385[24,100] 405ns 768kb|-----------------------------------------L0.385-----------------------------------------|"
- - "L0.386[24,100] 406ns 768kb|-----------------------------------------L0.386-----------------------------------------|"
- - "L0.387[24,100] 407ns 768kb|-----------------------------------------L0.387-----------------------------------------|"
- - "L0.388[24,100] 408ns 768kb|-----------------------------------------L0.388-----------------------------------------|"
- - "L0.389[24,100] 409ns 768kb|-----------------------------------------L0.389-----------------------------------------|"
- - "L0.390[24,100] 410ns 768kb|-----------------------------------------L0.390-----------------------------------------|"
- - "L0.391[24,100] 411ns 768kb|-----------------------------------------L0.391-----------------------------------------|"
- - "L0.392[24,100] 412ns 768kb|-----------------------------------------L0.392-----------------------------------------|"
- - "L0.393[24,100] 413ns 768kb|-----------------------------------------L0.393-----------------------------------------|"
- - "L0.394[24,100] 414ns 768kb|-----------------------------------------L0.394-----------------------------------------|"
- - "L0.395[24,100] 415ns 768kb|-----------------------------------------L0.395-----------------------------------------|"
- - "L0.396[24,100] 416ns 768kb|-----------------------------------------L0.396-----------------------------------------|"
- - "L0.397[24,100] 417ns 768kb|-----------------------------------------L0.397-----------------------------------------|"
- - "L0.398[24,100] 418ns 768kb|-----------------------------------------L0.398-----------------------------------------|"
- - "**** 3 Output Files (parquet_file_id not yet assigned), 299mb total:"
- - "L0 "
- - "L0.?[24,50] 418ns 102mb |------------L0.?------------| "
- - "L0.?[51,76] 418ns 98mb |-----------L0.?------------| "
- - "L0.?[77,100] 418ns 98mb |----------L0.?-----------| "
- - "Committing partition 1:"
- - " Soft Deleting 200 files: L0.201, L0.202, L0.203, L0.204, L0.205, L0.206, L0.207, L0.208, L0.209, L0.210, L0.211, L0.212, L0.213, L0.214, L0.215, L0.216, L0.217, L0.218, L0.219, L0.220, L0.221, L0.222, L0.223, L0.224, L0.225, L0.226, L0.227, L0.228, L0.229, L0.230, L0.231, L0.232, L0.233, L0.234, L0.235, L0.236, L0.237, L0.238, L0.239, L0.240, L0.241, L0.242, L0.243, L0.244, L0.245, L0.246, L0.247, L0.248, L0.249, L0.250, L0.251, L0.252, L0.253, L0.254, L0.255, L0.256, L0.257, L0.258, L0.259, L0.260, L0.261, L0.262, L0.263, L0.264, L0.265, L0.266, L0.267, L0.268, L0.269, L0.270, L0.271, L0.272, L0.273, L0.274, L0.275, L0.276, L0.277, L0.278, L0.279, L0.280, L0.281, L0.282, L0.283, L0.284, L0.285, L0.286, L0.287, L0.288, L0.289, L0.290, L0.291, L0.292, L0.293, L0.294, L0.295, L0.296, L0.297, L0.298, L0.299, L0.300, L0.301, L0.302, L0.303, L0.304, L0.305, L0.306, L0.307, L0.308, L0.309, L0.310, L0.311, L0.312, L0.313, L0.314, L0.315, L0.316, L0.317, L0.318, L0.319, L0.320, L0.321, L0.322, L0.323, L0.324, L0.325, L0.326, L0.327, L0.328, L0.329, L0.330, L0.331, L0.332, L0.333, L0.334, L0.335, L0.336, L0.337, L0.338, L0.339, L0.340, L0.341, L0.342, L0.343, L0.344, L0.345, L0.346, L0.347, L0.348, L0.349, L0.350, L0.351, L0.352, L0.353, L0.354, L0.355, L0.356, L0.357, L0.358, L0.359, L0.360, L0.361, L0.362, L0.363, L0.364, L0.365, L0.366, L0.367, L0.368, L0.369, L0.370, L0.371, L0.372, L0.373, L0.374, L0.375, L0.376, L0.377, L0.378, L0.379, L0.380, L0.381, L0.382, L0.383, L0.384, L0.385, L0.386, L0.387, L0.388, L0.389, L0.390, L0.391, L0.392, L0.393, L0.394, L0.395, L0.396, L0.397, L0.398, L0.1001, L0.1002"
- - " Creating 3 files"
- - "**** Simulation run 2, type=split(CompactAndSplitOutput(ManySmallFiles))(split_times=[42, 60, 78, 96]). 200 Input Files, 446mb total:"
- - "L0 "
- - "L0.1003[24,50] 418ns 102mb|----------L0.1003-----------| "
- - "L0.1004[51,76] 418ns 98mb |----------L0.1004----------| "
- - "L0.1005[77,100] 418ns 98mb |---------L0.1005---------| "
- - "L0.399[24,100] 419ns 768kb|-----------------------------------------L0.399-----------------------------------------|"
- - "L0.400[24,100] 420ns 768kb|-----------------------------------------L0.400-----------------------------------------|"
- - "L0.401[24,100] 421ns 768kb|-----------------------------------------L0.401-----------------------------------------|"
- - "L0.402[24,100] 422ns 768kb|-----------------------------------------L0.402-----------------------------------------|"
- - "L0.403[24,100] 423ns 768kb|-----------------------------------------L0.403-----------------------------------------|"
- - "L0.404[24,100] 424ns 768kb|-----------------------------------------L0.404-----------------------------------------|"
- - "L0.405[24,100] 425ns 768kb|-----------------------------------------L0.405-----------------------------------------|"
- - "L0.406[24,100] 426ns 768kb|-----------------------------------------L0.406-----------------------------------------|"
- - "L0.407[24,100] 427ns 768kb|-----------------------------------------L0.407-----------------------------------------|"
- - "L0.408[24,100] 428ns 768kb|-----------------------------------------L0.408-----------------------------------------|"
- - "L0.409[24,100] 429ns 768kb|-----------------------------------------L0.409-----------------------------------------|"
- - "L0.410[24,100] 430ns 768kb|-----------------------------------------L0.410-----------------------------------------|"
- - "L0.411[24,100] 431ns 768kb|-----------------------------------------L0.411-----------------------------------------|"
- - "L0.412[24,100] 432ns 768kb|-----------------------------------------L0.412-----------------------------------------|"
- - "L0.413[24,100] 433ns 768kb|-----------------------------------------L0.413-----------------------------------------|"
- - "L0.414[24,100] 434ns 768kb|-----------------------------------------L0.414-----------------------------------------|"
- - "L0.415[24,100] 435ns 768kb|-----------------------------------------L0.415-----------------------------------------|"
- - "L0.416[24,100] 436ns 768kb|-----------------------------------------L0.416-----------------------------------------|"
- - "L0.417[24,100] 437ns 768kb|-----------------------------------------L0.417-----------------------------------------|"
- - "L0.418[24,100] 438ns 768kb|-----------------------------------------L0.418-----------------------------------------|"
- - "L0.419[24,100] 439ns 768kb|-----------------------------------------L0.419-----------------------------------------|"
- - "L0.420[24,100] 440ns 768kb|-----------------------------------------L0.420-----------------------------------------|"
- - "L0.421[24,100] 441ns 768kb|-----------------------------------------L0.421-----------------------------------------|"
- - "L0.422[24,100] 442ns 768kb|-----------------------------------------L0.422-----------------------------------------|"
- - "L0.423[24,100] 443ns 768kb|-----------------------------------------L0.423-----------------------------------------|"
- - "L0.424[24,100] 444ns 768kb|-----------------------------------------L0.424-----------------------------------------|"
- - "L0.425[24,100] 445ns 768kb|-----------------------------------------L0.425-----------------------------------------|"
- - "L0.426[24,100] 446ns 768kb|-----------------------------------------L0.426-----------------------------------------|"
- - "L0.427[24,100] 447ns 768kb|-----------------------------------------L0.427-----------------------------------------|"
- - "L0.428[24,100] 448ns 768kb|-----------------------------------------L0.428-----------------------------------------|"
- - "L0.429[24,100] 449ns 768kb|-----------------------------------------L0.429-----------------------------------------|"
- - "L0.430[24,100] 450ns 768kb|-----------------------------------------L0.430-----------------------------------------|"
- - "L0.431[24,100] 451ns 768kb|-----------------------------------------L0.431-----------------------------------------|"
- - "L0.432[24,100] 452ns 768kb|-----------------------------------------L0.432-----------------------------------------|"
- - "L0.433[24,100] 453ns 768kb|-----------------------------------------L0.433-----------------------------------------|"
- - "L0.434[24,100] 454ns 768kb|-----------------------------------------L0.434-----------------------------------------|"
- - "L0.435[24,100] 455ns 768kb|-----------------------------------------L0.435-----------------------------------------|"
- - "L0.436[24,100] 456ns 768kb|-----------------------------------------L0.436-----------------------------------------|"
- - "L0.437[24,100] 457ns 768kb|-----------------------------------------L0.437-----------------------------------------|"
- - "L0.438[24,100] 458ns 768kb|-----------------------------------------L0.438-----------------------------------------|"
- - "L0.439[24,100] 459ns 768kb|-----------------------------------------L0.439-----------------------------------------|"
- - "L0.440[24,100] 460ns 768kb|-----------------------------------------L0.440-----------------------------------------|"
- - "L0.441[24,100] 461ns 768kb|-----------------------------------------L0.441-----------------------------------------|"
- - "L0.442[24,100] 462ns 768kb|-----------------------------------------L0.442-----------------------------------------|"
- - "L0.443[24,100] 463ns 768kb|-----------------------------------------L0.443-----------------------------------------|"
- - "L0.444[24,100] 464ns 768kb|-----------------------------------------L0.444-----------------------------------------|"
- - "L0.445[24,100] 465ns 768kb|-----------------------------------------L0.445-----------------------------------------|"
- - "L0.446[24,100] 466ns 768kb|-----------------------------------------L0.446-----------------------------------------|"
- - "L0.447[24,100] 467ns 768kb|-----------------------------------------L0.447-----------------------------------------|"
- - "L0.448[24,100] 468ns 768kb|-----------------------------------------L0.448-----------------------------------------|"
- - "L0.449[24,100] 469ns 768kb|-----------------------------------------L0.449-----------------------------------------|"
- - "L0.450[24,100] 470ns 768kb|-----------------------------------------L0.450-----------------------------------------|"
- - "L0.451[24,100] 471ns 768kb|-----------------------------------------L0.451-----------------------------------------|"
- - "L0.452[24,100] 472ns 768kb|-----------------------------------------L0.452-----------------------------------------|"
- - "L0.453[24,100] 473ns 768kb|-----------------------------------------L0.453-----------------------------------------|"
- - "L0.454[24,100] 474ns 768kb|-----------------------------------------L0.454-----------------------------------------|"
- - "L0.455[24,100] 475ns 768kb|-----------------------------------------L0.455-----------------------------------------|"
- - "L0.456[24,100] 476ns 768kb|-----------------------------------------L0.456-----------------------------------------|"
- - "L0.457[24,100] 477ns 768kb|-----------------------------------------L0.457-----------------------------------------|"
- - "L0.458[24,100] 478ns 768kb|-----------------------------------------L0.458-----------------------------------------|"
- - "L0.459[24,100] 479ns 768kb|-----------------------------------------L0.459-----------------------------------------|"
- - "L0.460[24,100] 480ns 768kb|-----------------------------------------L0.460-----------------------------------------|"
- - "L0.461[24,100] 481ns 768kb|-----------------------------------------L0.461-----------------------------------------|"
- - "L0.462[24,100] 482ns 768kb|-----------------------------------------L0.462-----------------------------------------|"
- - "L0.463[24,100] 483ns 768kb|-----------------------------------------L0.463-----------------------------------------|"
- - "L0.464[24,100] 484ns 768kb|-----------------------------------------L0.464-----------------------------------------|"
- - "L0.465[24,100] 485ns 768kb|-----------------------------------------L0.465-----------------------------------------|"
- - "L0.466[24,100] 486ns 768kb|-----------------------------------------L0.466-----------------------------------------|"
- - "L0.467[24,100] 487ns 768kb|-----------------------------------------L0.467-----------------------------------------|"
- - "L0.468[24,100] 488ns 768kb|-----------------------------------------L0.468-----------------------------------------|"
- - "L0.469[24,100] 489ns 768kb|-----------------------------------------L0.469-----------------------------------------|"
- - "L0.470[24,100] 490ns 768kb|-----------------------------------------L0.470-----------------------------------------|"
- - "L0.471[24,100] 491ns 768kb|-----------------------------------------L0.471-----------------------------------------|"
- - "L0.472[24,100] 492ns 768kb|-----------------------------------------L0.472-----------------------------------------|"
- - "L0.473[24,100] 493ns 768kb|-----------------------------------------L0.473-----------------------------------------|"
- - "L0.474[24,100] 494ns 768kb|-----------------------------------------L0.474-----------------------------------------|"
- - "L0.475[24,100] 495ns 768kb|-----------------------------------------L0.475-----------------------------------------|"
- - "L0.476[24,100] 496ns 768kb|-----------------------------------------L0.476-----------------------------------------|"
- - "L0.477[24,100] 497ns 768kb|-----------------------------------------L0.477-----------------------------------------|"
- - "L0.478[24,100] 498ns 768kb|-----------------------------------------L0.478-----------------------------------------|"
- - "L0.479[24,100] 499ns 768kb|-----------------------------------------L0.479-----------------------------------------|"
- - "L0.480[24,100] 500ns 768kb|-----------------------------------------L0.480-----------------------------------------|"
- - "L0.481[24,100] 501ns 768kb|-----------------------------------------L0.481-----------------------------------------|"
- - "L0.482[24,100] 502ns 768kb|-----------------------------------------L0.482-----------------------------------------|"
- - "L0.483[24,100] 503ns 768kb|-----------------------------------------L0.483-----------------------------------------|"
- - "L0.484[24,100] 504ns 768kb|-----------------------------------------L0.484-----------------------------------------|"
- - "L0.485[24,100] 505ns 768kb|-----------------------------------------L0.485-----------------------------------------|"
- - "L0.486[24,100] 506ns 768kb|-----------------------------------------L0.486-----------------------------------------|"
- - "L0.487[24,100] 507ns 768kb|-----------------------------------------L0.487-----------------------------------------|"
- - "L0.488[24,100] 508ns 768kb|-----------------------------------------L0.488-----------------------------------------|"
- - "L0.489[24,100] 509ns 768kb|-----------------------------------------L0.489-----------------------------------------|"
- - "L0.490[24,100] 510ns 768kb|-----------------------------------------L0.490-----------------------------------------|"
- - "L0.491[24,100] 511ns 768kb|-----------------------------------------L0.491-----------------------------------------|"
- - "L0.492[24,100] 512ns 768kb|-----------------------------------------L0.492-----------------------------------------|"
- - "L0.493[24,100] 513ns 768kb|-----------------------------------------L0.493-----------------------------------------|"
- - "L0.494[24,100] 514ns 768kb|-----------------------------------------L0.494-----------------------------------------|"
- - "L0.495[24,100] 515ns 768kb|-----------------------------------------L0.495-----------------------------------------|"
- - "L0.496[24,100] 516ns 768kb|-----------------------------------------L0.496-----------------------------------------|"
- - "L0.497[24,100] 517ns 768kb|-----------------------------------------L0.497-----------------------------------------|"
- - "L0.498[24,100] 518ns 768kb|-----------------------------------------L0.498-----------------------------------------|"
- - "L0.499[24,100] 519ns 768kb|-----------------------------------------L0.499-----------------------------------------|"
- - "L0.500[24,100] 520ns 768kb|-----------------------------------------L0.500-----------------------------------------|"
- - "L0.501[24,100] 521ns 768kb|-----------------------------------------L0.501-----------------------------------------|"
- - "L0.502[24,100] 522ns 768kb|-----------------------------------------L0.502-----------------------------------------|"
- - "L0.503[24,100] 523ns 768kb|-----------------------------------------L0.503-----------------------------------------|"
- - "L0.504[24,100] 524ns 768kb|-----------------------------------------L0.504-----------------------------------------|"
- - "L0.505[24,100] 525ns 768kb|-----------------------------------------L0.505-----------------------------------------|"
- - "L0.506[24,100] 526ns 768kb|-----------------------------------------L0.506-----------------------------------------|"
- - "L0.507[24,100] 527ns 768kb|-----------------------------------------L0.507-----------------------------------------|"
- - "L0.508[24,100] 528ns 768kb|-----------------------------------------L0.508-----------------------------------------|"
- - "L0.509[24,100] 529ns 768kb|-----------------------------------------L0.509-----------------------------------------|"
- - "L0.510[24,100] 530ns 768kb|-----------------------------------------L0.510-----------------------------------------|"
- - "L0.511[24,100] 531ns 768kb|-----------------------------------------L0.511-----------------------------------------|"
- - "L0.512[24,100] 532ns 768kb|-----------------------------------------L0.512-----------------------------------------|"
- - "L0.513[24,100] 533ns 768kb|-----------------------------------------L0.513-----------------------------------------|"
- - "L0.514[24,100] 534ns 768kb|-----------------------------------------L0.514-----------------------------------------|"
- - "L0.515[24,100] 535ns 768kb|-----------------------------------------L0.515-----------------------------------------|"
- - "L0.516[24,100] 536ns 768kb|-----------------------------------------L0.516-----------------------------------------|"
- - "L0.517[24,100] 537ns 768kb|-----------------------------------------L0.517-----------------------------------------|"
- - "L0.518[24,100] 538ns 768kb|-----------------------------------------L0.518-----------------------------------------|"
- - "L0.519[24,100] 539ns 768kb|-----------------------------------------L0.519-----------------------------------------|"
- - "L0.520[24,100] 540ns 768kb|-----------------------------------------L0.520-----------------------------------------|"
- - "L0.521[24,100] 541ns 768kb|-----------------------------------------L0.521-----------------------------------------|"
- - "L0.522[24,100] 542ns 768kb|-----------------------------------------L0.522-----------------------------------------|"
- - "L0.523[24,100] 543ns 768kb|-----------------------------------------L0.523-----------------------------------------|"
- - "L0.524[24,100] 544ns 768kb|-----------------------------------------L0.524-----------------------------------------|"
- - "L0.525[24,100] 545ns 768kb|-----------------------------------------L0.525-----------------------------------------|"
- - "L0.526[24,100] 546ns 768kb|-----------------------------------------L0.526-----------------------------------------|"
- - "L0.527[24,100] 547ns 768kb|-----------------------------------------L0.527-----------------------------------------|"
- - "L0.528[24,100] 548ns 768kb|-----------------------------------------L0.528-----------------------------------------|"
- - "L0.529[24,100] 549ns 768kb|-----------------------------------------L0.529-----------------------------------------|"
- - "L0.530[24,100] 550ns 768kb|-----------------------------------------L0.530-----------------------------------------|"
- - "L0.531[24,100] 551ns 768kb|-----------------------------------------L0.531-----------------------------------------|"
- - "L0.532[24,100] 552ns 768kb|-----------------------------------------L0.532-----------------------------------------|"
- - "L0.533[24,100] 553ns 768kb|-----------------------------------------L0.533-----------------------------------------|"
- - "L0.534[24,100] 554ns 768kb|-----------------------------------------L0.534-----------------------------------------|"
- - "L0.535[24,100] 555ns 768kb|-----------------------------------------L0.535-----------------------------------------|"
- - "L0.536[24,100] 556ns 768kb|-----------------------------------------L0.536-----------------------------------------|"
- - "L0.537[24,100] 557ns 768kb|-----------------------------------------L0.537-----------------------------------------|"
- - "L0.538[24,100] 558ns 768kb|-----------------------------------------L0.538-----------------------------------------|"
- - "L0.539[24,100] 559ns 768kb|-----------------------------------------L0.539-----------------------------------------|"
- - "L0.540[24,100] 560ns 768kb|-----------------------------------------L0.540-----------------------------------------|"
- - "L0.541[24,100] 561ns 768kb|-----------------------------------------L0.541-----------------------------------------|"
- - "L0.542[24,100] 562ns 768kb|-----------------------------------------L0.542-----------------------------------------|"
- - "L0.543[24,100] 563ns 768kb|-----------------------------------------L0.543-----------------------------------------|"
- - "L0.544[24,100] 564ns 768kb|-----------------------------------------L0.544-----------------------------------------|"
- - "L0.545[24,100] 565ns 768kb|-----------------------------------------L0.545-----------------------------------------|"
- - "L0.546[24,100] 566ns 768kb|-----------------------------------------L0.546-----------------------------------------|"
- - "L0.547[24,100] 567ns 768kb|-----------------------------------------L0.547-----------------------------------------|"
- - "L0.548[24,100] 568ns 768kb|-----------------------------------------L0.548-----------------------------------------|"
- - "L0.549[24,100] 569ns 768kb|-----------------------------------------L0.549-----------------------------------------|"
- - "L0.550[24,100] 570ns 768kb|-----------------------------------------L0.550-----------------------------------------|"
- - "L0.551[24,100] 571ns 768kb|-----------------------------------------L0.551-----------------------------------------|"
- - "L0.552[24,100] 572ns 768kb|-----------------------------------------L0.552-----------------------------------------|"
- - "L0.553[24,100] 573ns 768kb|-----------------------------------------L0.553-----------------------------------------|"
- - "L0.554[24,100] 574ns 768kb|-----------------------------------------L0.554-----------------------------------------|"
- - "L0.555[24,100] 575ns 768kb|-----------------------------------------L0.555-----------------------------------------|"
- - "L0.556[24,100] 576ns 768kb|-----------------------------------------L0.556-----------------------------------------|"
- - "L0.557[24,100] 577ns 768kb|-----------------------------------------L0.557-----------------------------------------|"
- - "L0.558[24,100] 578ns 768kb|-----------------------------------------L0.558-----------------------------------------|"
- - "L0.559[24,100] 579ns 768kb|-----------------------------------------L0.559-----------------------------------------|"
- - "L0.560[24,100] 580ns 768kb|-----------------------------------------L0.560-----------------------------------------|"
- - "L0.561[24,100] 581ns 768kb|-----------------------------------------L0.561-----------------------------------------|"
- - "L0.562[24,100] 582ns 768kb|-----------------------------------------L0.562-----------------------------------------|"
- - "L0.563[24,100] 583ns 768kb|-----------------------------------------L0.563-----------------------------------------|"
- - "L0.564[24,100] 584ns 768kb|-----------------------------------------L0.564-----------------------------------------|"
- - "L0.565[24,100] 585ns 768kb|-----------------------------------------L0.565-----------------------------------------|"
- - "L0.566[24,100] 586ns 768kb|-----------------------------------------L0.566-----------------------------------------|"
- - "L0.567[24,100] 587ns 768kb|-----------------------------------------L0.567-----------------------------------------|"
- - "L0.568[24,100] 588ns 768kb|-----------------------------------------L0.568-----------------------------------------|"
- - "L0.569[24,100] 589ns 768kb|-----------------------------------------L0.569-----------------------------------------|"
- - "L0.570[24,100] 590ns 768kb|-----------------------------------------L0.570-----------------------------------------|"
- - "L0.571[24,100] 591ns 768kb|-----------------------------------------L0.571-----------------------------------------|"
- - "L0.572[24,100] 592ns 768kb|-----------------------------------------L0.572-----------------------------------------|"
- - "L0.573[24,100] 593ns 768kb|-----------------------------------------L0.573-----------------------------------------|"
- - "L0.574[24,100] 594ns 768kb|-----------------------------------------L0.574-----------------------------------------|"
- - "L0.575[24,100] 595ns 768kb|-----------------------------------------L0.575-----------------------------------------|"
- - "L0.576[24,100] 596ns 768kb|-----------------------------------------L0.576-----------------------------------------|"
- - "L0.577[24,100] 597ns 768kb|-----------------------------------------L0.577-----------------------------------------|"
- - "L0.578[24,100] 598ns 768kb|-----------------------------------------L0.578-----------------------------------------|"
- - "L0.579[24,100] 599ns 768kb|-----------------------------------------L0.579-----------------------------------------|"
- - "L0.580[24,100] 600ns 768kb|-----------------------------------------L0.580-----------------------------------------|"
- - "L0.581[24,100] 601ns 768kb|-----------------------------------------L0.581-----------------------------------------|"
- - "L0.582[24,100] 602ns 768kb|-----------------------------------------L0.582-----------------------------------------|"
- - "L0.583[24,100] 603ns 768kb|-----------------------------------------L0.583-----------------------------------------|"
- - "L0.584[24,100] 604ns 768kb|-----------------------------------------L0.584-----------------------------------------|"
- - "L0.585[24,100] 605ns 768kb|-----------------------------------------L0.585-----------------------------------------|"
- - "L0.586[24,100] 606ns 768kb|-----------------------------------------L0.586-----------------------------------------|"
- - "L0.587[24,100] 607ns 768kb|-----------------------------------------L0.587-----------------------------------------|"
- - "L0.588[24,100] 608ns 768kb|-----------------------------------------L0.588-----------------------------------------|"
- - "L0.589[24,100] 609ns 768kb|-----------------------------------------L0.589-----------------------------------------|"
- - "L0.590[24,100] 610ns 768kb|-----------------------------------------L0.590-----------------------------------------|"
- - "L0.591[24,100] 611ns 768kb|-----------------------------------------L0.591-----------------------------------------|"
- - "L0.592[24,100] 612ns 768kb|-----------------------------------------L0.592-----------------------------------------|"
- - "L0.593[24,100] 613ns 768kb|-----------------------------------------L0.593-----------------------------------------|"
- - "L0.594[24,100] 614ns 768kb|-----------------------------------------L0.594-----------------------------------------|"
- - "L0.595[24,100] 615ns 768kb|-----------------------------------------L0.595-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 446mb total:"
- - "L0 "
- - "L0.?[24,42] 615ns 106mb |-------L0.?--------| "
- - "L0.?[43,60] 615ns 100mb |-------L0.?-------| "
- - "L0.?[61,78] 615ns 100mb |-------L0.?-------| "
- - "L0.?[79,96] 615ns 100mb |-------L0.?-------| "
- - "L0.?[97,100] 615ns 41mb |L0.?|"
- - "Committing partition 1:"
- - " Soft Deleting 200 files: L0.399, L0.400, L0.401, L0.402, L0.403, L0.404, L0.405, L0.406, L0.407, L0.408, L0.409, L0.410, L0.411, L0.412, L0.413, L0.414, L0.415, L0.416, L0.417, L0.418, L0.419, L0.420, L0.421, L0.422, L0.423, L0.424, L0.425, L0.426, L0.427, L0.428, L0.429, L0.430, L0.431, L0.432, L0.433, L0.434, L0.435, L0.436, L0.437, L0.438, L0.439, L0.440, L0.441, L0.442, L0.443, L0.444, L0.445, L0.446, L0.447, L0.448, L0.449, L0.450, L0.451, L0.452, L0.453, L0.454, L0.455, L0.456, L0.457, L0.458, L0.459, L0.460, L0.461, L0.462, L0.463, L0.464, L0.465, L0.466, L0.467, L0.468, L0.469, L0.470, L0.471, L0.472, L0.473, L0.474, L0.475, L0.476, L0.477, L0.478, L0.479, L0.480, L0.481, L0.482, L0.483, L0.484, L0.485, L0.486, L0.487, L0.488, L0.489, L0.490, L0.491, L0.492, L0.493, L0.494, L0.495, L0.496, L0.497, L0.498, L0.499, L0.500, L0.501, L0.502, L0.503, L0.504, L0.505, L0.506, L0.507, L0.508, L0.509, L0.510, L0.511, L0.512, L0.513, L0.514, L0.515, L0.516, L0.517, L0.518, L0.519, L0.520, L0.521, L0.522, L0.523, L0.524, L0.525, L0.526, L0.527, L0.528, L0.529, L0.530, L0.531, L0.532, L0.533, L0.534, L0.535, L0.536, L0.537, L0.538, L0.539, L0.540, L0.541, L0.542, L0.543, L0.544, L0.545, L0.546, L0.547, L0.548, L0.549, L0.550, L0.551, L0.552, L0.553, L0.554, L0.555, L0.556, L0.557, L0.558, L0.559, L0.560, L0.561, L0.562, L0.563, L0.564, L0.565, L0.566, L0.567, L0.568, L0.569, L0.570, L0.571, L0.572, L0.573, L0.574, L0.575, L0.576, L0.577, L0.578, L0.579, L0.580, L0.581, L0.582, L0.583, L0.584, L0.585, L0.586, L0.587, L0.588, L0.589, L0.590, L0.591, L0.592, L0.593, L0.594, L0.595, L0.1003, L0.1004, L0.1005"
- - " Creating 5 files"
- - "**** Simulation run 3, type=split(HighL0OverlapTotalBacklog)(split_times=[39]). 1 Input Files, 106mb total:"
- - "L0, all files 106mb "
- - "L0.1006[24,42] 615ns |----------------------------------------L0.1006-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 106mb total:"
- - "L0 "
- - "L0.?[24,39] 615ns 88mb |----------------------------------L0.?-----------------------------------| "
- - "L0.?[40,42] 615ns 18mb |--L0.?--|"
- - "**** Simulation run 4, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.596[24,100] 616ns |-----------------------------------------L0.596-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 616ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 616ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 616ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 616ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 616ns 192kb |-----L0.?------| "
- - "**** Simulation run 5, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.597[24,100] 617ns |-----------------------------------------L0.597-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 617ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 617ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 617ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 617ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 617ns 192kb |-----L0.?------| "
- - "**** Simulation run 6, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.598[24,100] 618ns |-----------------------------------------L0.598-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 618ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 618ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 618ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 618ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 618ns 192kb |-----L0.?------| "
- - "**** Simulation run 7, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.599[24,100] 619ns |-----------------------------------------L0.599-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 619ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 619ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 619ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 619ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 619ns 192kb |-----L0.?------| "
- - "**** Simulation run 8, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.600[24,100] 620ns |-----------------------------------------L0.600-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 620ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 620ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 620ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 620ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 620ns 192kb |-----L0.?------| "
- - "**** Simulation run 9, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.601[24,100] 621ns |-----------------------------------------L0.601-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 621ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 621ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 621ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 621ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 621ns 192kb |-----L0.?------| "
- - "**** Simulation run 10, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.602[24,100] 622ns |-----------------------------------------L0.602-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 622ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 622ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 622ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 622ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 622ns 192kb |-----L0.?------| "
- - "**** Simulation run 11, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.603[24,100] 623ns |-----------------------------------------L0.603-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 623ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 623ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 623ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 623ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 623ns 192kb |-----L0.?------| "
- - "**** Simulation run 12, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.604[24,100] 624ns |-----------------------------------------L0.604-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 624ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 624ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 624ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 624ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 624ns 192kb |-----L0.?------| "
- - "**** Simulation run 13, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.605[24,100] 625ns |-----------------------------------------L0.605-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 625ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 625ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 625ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 625ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 625ns 192kb |-----L0.?------| "
- - "**** Simulation run 14, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.606[24,100] 626ns |-----------------------------------------L0.606-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 626ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 626ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 626ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 626ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 626ns 192kb |-----L0.?------| "
- - "**** Simulation run 15, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.607[24,100] 627ns |-----------------------------------------L0.607-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 627ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 627ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 627ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 627ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 627ns 192kb |-----L0.?------| "
- - "**** Simulation run 16, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.608[24,100] 628ns |-----------------------------------------L0.608-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 628ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 628ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 628ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 628ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 628ns 192kb |-----L0.?------| "
- - "**** Simulation run 17, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.609[24,100] 629ns |-----------------------------------------L0.609-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 629ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 629ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 629ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 629ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 629ns 192kb |-----L0.?------| "
- - "**** Simulation run 18, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.610[24,100] 630ns |-----------------------------------------L0.610-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 630ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 630ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 630ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 630ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 630ns 192kb |-----L0.?------| "
- - "**** Simulation run 19, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.611[24,100] 631ns |-----------------------------------------L0.611-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 631ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 631ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 631ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 631ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 631ns 192kb |-----L0.?------| "
- - "**** Simulation run 20, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.612[24,100] 632ns |-----------------------------------------L0.612-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 632ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 632ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 632ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 632ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 632ns 192kb |-----L0.?------| "
- - "**** Simulation run 21, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.613[24,100] 633ns |-----------------------------------------L0.613-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 633ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 633ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 633ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 633ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 633ns 192kb |-----L0.?------| "
- - "**** Simulation run 22, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.614[24,100] 634ns |-----------------------------------------L0.614-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 634ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 634ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 634ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 634ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 634ns 192kb |-----L0.?------| "
- - "**** Simulation run 23, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.615[24,100] 635ns |-----------------------------------------L0.615-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 635ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 635ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 635ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 635ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 635ns 192kb |-----L0.?------| "
- - "**** Simulation run 24, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.616[24,100] 636ns |-----------------------------------------L0.616-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 636ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 636ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 636ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 636ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 636ns 192kb |-----L0.?------| "
- - "**** Simulation run 25, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.617[24,100] 637ns |-----------------------------------------L0.617-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 637ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 637ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 637ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 637ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 637ns 192kb |-----L0.?------| "
- - "**** Simulation run 26, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.618[24,100] 638ns |-----------------------------------------L0.618-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 638ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 638ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 638ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 638ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 638ns 192kb |-----L0.?------| "
- - "**** Simulation run 27, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.619[24,100] 639ns |-----------------------------------------L0.619-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 639ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 639ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 639ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 639ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 639ns 192kb |-----L0.?------| "
- - "**** Simulation run 28, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.620[24,100] 640ns |-----------------------------------------L0.620-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 640ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 640ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 640ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 640ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 640ns 192kb |-----L0.?------| "
- - "**** Simulation run 29, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.621[24,100] 641ns |-----------------------------------------L0.621-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 641ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 641ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 641ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 641ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 641ns 192kb |-----L0.?------| "
- - "**** Simulation run 30, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.622[24,100] 642ns |-----------------------------------------L0.622-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 642ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 642ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 642ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 642ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 642ns 192kb |-----L0.?------| "
- - "**** Simulation run 31, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.623[24,100] 643ns |-----------------------------------------L0.623-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 643ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 643ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 643ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 643ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 643ns 192kb |-----L0.?------| "
- - "**** Simulation run 32, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.624[24,100] 644ns |-----------------------------------------L0.624-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 644ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 644ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 644ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 644ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 644ns 192kb |-----L0.?------| "
- - "**** Simulation run 33, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.625[24,100] 645ns |-----------------------------------------L0.625-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 645ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 645ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 645ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 645ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 645ns 192kb |-----L0.?------| "
- - "**** Simulation run 34, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.626[24,100] 646ns |-----------------------------------------L0.626-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 646ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 646ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 646ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 646ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 646ns 192kb |-----L0.?------| "
- - "**** Simulation run 35, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.627[24,100] 647ns |-----------------------------------------L0.627-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 647ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 647ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 647ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 647ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 647ns 192kb |-----L0.?------| "
- - "**** Simulation run 36, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.630[24,100] 650ns |-----------------------------------------L0.630-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 650ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 650ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 650ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 650ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 650ns 192kb |-----L0.?------| "
- - "**** Simulation run 37, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.631[24,100] 651ns |-----------------------------------------L0.631-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 651ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 651ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 651ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 651ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 651ns 192kb |-----L0.?------| "
- - "**** Simulation run 38, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.632[24,100] 652ns |-----------------------------------------L0.632-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 652ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 652ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 652ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 652ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 652ns 192kb |-----L0.?------| "
- - "**** Simulation run 39, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.633[24,100] 653ns |-----------------------------------------L0.633-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 653ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 653ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 653ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 653ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 653ns 192kb |-----L0.?------| "
- - "**** Simulation run 40, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.634[24,100] 654ns |-----------------------------------------L0.634-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 654ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 654ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 654ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 654ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 654ns 192kb |-----L0.?------| "
- - "**** Simulation run 41, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.635[24,100] 655ns |-----------------------------------------L0.635-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 655ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 655ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 655ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 655ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 655ns 192kb |-----L0.?------| "
- - "**** Simulation run 42, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.636[24,100] 656ns |-----------------------------------------L0.636-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 656ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 656ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 656ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 656ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 656ns 192kb |-----L0.?------| "
- - "**** Simulation run 43, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.637[24,100] 657ns |-----------------------------------------L0.637-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 657ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 657ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 657ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 657ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 657ns 192kb |-----L0.?------| "
- - "**** Simulation run 44, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.628[24,100] 648ns |-----------------------------------------L0.628-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 648ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 648ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 648ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 648ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 648ns 192kb |-----L0.?------| "
- - "**** Simulation run 45, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.629[24,100] 649ns |-----------------------------------------L0.629-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 649ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 649ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 649ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 649ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 649ns 192kb |-----L0.?------| "
- - "**** Simulation run 46, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.638[24,100] 658ns |-----------------------------------------L0.638-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 658ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 658ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 658ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 658ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 658ns 192kb |-----L0.?------| "
- - "**** Simulation run 47, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.639[24,100] 659ns |-----------------------------------------L0.639-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 659ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 659ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 659ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 659ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 659ns 192kb |-----L0.?------| "
- - "**** Simulation run 48, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.640[24,100] 660ns |-----------------------------------------L0.640-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 660ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 660ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 660ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 660ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 660ns 192kb |-----L0.?------| "
- - "**** Simulation run 49, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.641[24,100] 661ns |-----------------------------------------L0.641-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 661ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 661ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 661ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 661ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 661ns 192kb |-----L0.?------| "
- - "**** Simulation run 50, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.642[24,100] 662ns |-----------------------------------------L0.642-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 662ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 662ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 662ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 662ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 662ns 192kb |-----L0.?------| "
- - "**** Simulation run 51, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.643[24,100] 663ns |-----------------------------------------L0.643-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 663ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 663ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 663ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 663ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 663ns 192kb |-----L0.?------| "
- - "**** Simulation run 52, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.644[24,100] 664ns |-----------------------------------------L0.644-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 664ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 664ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 664ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 664ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 664ns 192kb |-----L0.?------| "
- - "**** Simulation run 53, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.645[24,100] 665ns |-----------------------------------------L0.645-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 665ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 665ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 665ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 665ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 665ns 192kb |-----L0.?------| "
- - "**** Simulation run 54, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.646[24,100] 666ns |-----------------------------------------L0.646-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 666ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 666ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 666ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 666ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 666ns 192kb |-----L0.?------| "
- - "**** Simulation run 55, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.647[24,100] 667ns |-----------------------------------------L0.647-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 667ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 667ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 667ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 667ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 667ns 192kb |-----L0.?------| "
- - "**** Simulation run 56, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.648[24,100] 668ns |-----------------------------------------L0.648-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 668ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 668ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 668ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 668ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 668ns 192kb |-----L0.?------| "
- - "**** Simulation run 57, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.649[24,100] 669ns |-----------------------------------------L0.649-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 669ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 669ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 669ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 669ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 669ns 192kb |-----L0.?------| "
- - "**** Simulation run 58, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.650[24,100] 670ns |-----------------------------------------L0.650-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 670ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 670ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 670ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 670ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 670ns 192kb |-----L0.?------| "
- - "**** Simulation run 59, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.651[24,100] 671ns |-----------------------------------------L0.651-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 671ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 671ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 671ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 671ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 671ns 192kb |-----L0.?------| "
- - "**** Simulation run 60, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.652[24,100] 672ns |-----------------------------------------L0.652-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 672ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 672ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 672ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 672ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 672ns 192kb |-----L0.?------| "
- - "**** Simulation run 61, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.653[24,100] 673ns |-----------------------------------------L0.653-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 673ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 673ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 673ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 673ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 673ns 192kb |-----L0.?------| "
- - "**** Simulation run 62, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.654[24,100] 674ns |-----------------------------------------L0.654-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 674ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 674ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 674ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 674ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 674ns 192kb |-----L0.?------| "
- - "**** Simulation run 63, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.655[24,100] 675ns |-----------------------------------------L0.655-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 675ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 675ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 675ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 675ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 675ns 192kb |-----L0.?------| "
- - "**** Simulation run 64, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.656[24,100] 676ns |-----------------------------------------L0.656-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 676ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 676ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 676ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 676ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 676ns 192kb |-----L0.?------| "
- - "**** Simulation run 65, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.657[24,100] 677ns |-----------------------------------------L0.657-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 677ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 677ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 677ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 677ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 677ns 192kb |-----L0.?------| "
- - "**** Simulation run 66, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.658[24,100] 678ns |-----------------------------------------L0.658-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 678ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 678ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 678ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 678ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 678ns 192kb |-----L0.?------| "
- - "**** Simulation run 67, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.659[24,100] 679ns |-----------------------------------------L0.659-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 679ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 679ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 679ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 679ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 679ns 192kb |-----L0.?------| "
- - "**** Simulation run 68, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.660[24,100] 680ns |-----------------------------------------L0.660-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 680ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 680ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 680ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 680ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 680ns 192kb |-----L0.?------| "
- - "**** Simulation run 69, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.661[24,100] 681ns |-----------------------------------------L0.661-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 681ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 681ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 681ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 681ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 681ns 192kb |-----L0.?------| "
- - "**** Simulation run 70, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.662[24,100] 682ns |-----------------------------------------L0.662-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 682ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 682ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 682ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 682ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 682ns 192kb |-----L0.?------| "
- - "**** Simulation run 71, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.663[24,100] 683ns |-----------------------------------------L0.663-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 683ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 683ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 683ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 683ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 683ns 192kb |-----L0.?------| "
- - "**** Simulation run 72, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.664[24,100] 684ns |-----------------------------------------L0.664-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 684ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 684ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 684ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 684ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 684ns 192kb |-----L0.?------| "
- - "**** Simulation run 73, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.665[24,100] 685ns |-----------------------------------------L0.665-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 685ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 685ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 685ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 685ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 685ns 192kb |-----L0.?------| "
- - "**** Simulation run 74, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.666[24,100] 686ns |-----------------------------------------L0.666-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 686ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 686ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 686ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 686ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 686ns 192kb |-----L0.?------| "
- - "**** Simulation run 75, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.667[24,100] 687ns |-----------------------------------------L0.667-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 687ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 687ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 687ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 687ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 687ns 192kb |-----L0.?------| "
- - "**** Simulation run 76, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.668[24,100] 688ns |-----------------------------------------L0.668-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 688ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 688ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 688ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 688ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 688ns 192kb |-----L0.?------| "
- - "**** Simulation run 77, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.669[24,100] 689ns |-----------------------------------------L0.669-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 689ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 689ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 689ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 689ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 689ns 192kb |-----L0.?------| "
- - "**** Simulation run 78, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.670[24,100] 690ns |-----------------------------------------L0.670-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 690ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 690ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 690ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 690ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 690ns 192kb |-----L0.?------| "
- - "**** Simulation run 79, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.671[24,100] 691ns |-----------------------------------------L0.671-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 691ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 691ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 691ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 691ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 691ns 192kb |-----L0.?------| "
- - "**** Simulation run 80, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.672[24,100] 692ns |-----------------------------------------L0.672-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 692ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 692ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 692ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 692ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 692ns 192kb |-----L0.?------| "
- - "**** Simulation run 81, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.673[24,100] 693ns |-----------------------------------------L0.673-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 693ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 693ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 693ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 693ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 693ns 192kb |-----L0.?------| "
- - "**** Simulation run 82, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.674[24,100] 694ns |-----------------------------------------L0.674-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 694ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 694ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 694ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 694ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 694ns 192kb |-----L0.?------| "
- - "**** Simulation run 83, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.675[24,100] 695ns |-----------------------------------------L0.675-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 695ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 695ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 695ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 695ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 695ns 192kb |-----L0.?------| "
- - "**** Simulation run 84, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.676[24,100] 696ns |-----------------------------------------L0.676-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 696ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 696ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 696ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 696ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 696ns 192kb |-----L0.?------| "
- - "**** Simulation run 85, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.677[24,100] 697ns |-----------------------------------------L0.677-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 697ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 697ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 697ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 697ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 697ns 192kb |-----L0.?------| "
- - "**** Simulation run 86, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.678[24,100] 698ns |-----------------------------------------L0.678-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 698ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 698ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 698ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 698ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 698ns 192kb |-----L0.?------| "
- - "**** Simulation run 87, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.679[24,100] 699ns |-----------------------------------------L0.679-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 699ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 699ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 699ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 699ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 699ns 192kb |-----L0.?------| "
- - "**** Simulation run 88, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.680[24,100] 700ns |-----------------------------------------L0.680-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 700ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 700ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 700ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 700ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 700ns 192kb |-----L0.?------| "
- - "**** Simulation run 89, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.681[24,100] 701ns |-----------------------------------------L0.681-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 701ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 701ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 701ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 701ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 701ns 192kb |-----L0.?------| "
- - "**** Simulation run 90, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.682[24,100] 702ns |-----------------------------------------L0.682-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 702ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 702ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 702ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 702ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 702ns 192kb |-----L0.?------| "
- - "**** Simulation run 91, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.683[24,100] 703ns |-----------------------------------------L0.683-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 703ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 703ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 703ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 703ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 703ns 192kb |-----L0.?------| "
- - "**** Simulation run 92, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.684[24,100] 704ns |-----------------------------------------L0.684-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 704ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 704ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 704ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 704ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 704ns 192kb |-----L0.?------| "
- - "**** Simulation run 93, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.685[24,100] 705ns |-----------------------------------------L0.685-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 705ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 705ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 705ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 705ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 705ns 192kb |-----L0.?------| "
- - "**** Simulation run 94, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.686[24,100] 706ns |-----------------------------------------L0.686-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 706ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 706ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 706ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 706ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 706ns 192kb |-----L0.?------| "
- - "**** Simulation run 95, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.687[24,100] 707ns |-----------------------------------------L0.687-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 707ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 707ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 707ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 707ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 707ns 192kb |-----L0.?------| "
- - "**** Simulation run 96, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.688[24,100] 708ns |-----------------------------------------L0.688-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 708ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 708ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 708ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 708ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 708ns 192kb |-----L0.?------| "
- - "**** Simulation run 97, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.689[24,100] 709ns |-----------------------------------------L0.689-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 709ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 709ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 709ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 709ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 709ns 192kb |-----L0.?------| "
- - "**** Simulation run 98, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.690[24,100] 710ns |-----------------------------------------L0.690-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 710ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 710ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 710ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 710ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 710ns 192kb |-----L0.?------| "
- - "**** Simulation run 99, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.691[24,100] 711ns |-----------------------------------------L0.691-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 711ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 711ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 711ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 711ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 711ns 192kb |-----L0.?------| "
- - "**** Simulation run 100, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.692[24,100] 712ns |-----------------------------------------L0.692-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 712ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 712ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 712ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 712ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 712ns 192kb |-----L0.?------| "
- - "**** Simulation run 101, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.693[24,100] 713ns |-----------------------------------------L0.693-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 713ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 713ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 713ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 713ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 713ns 192kb |-----L0.?------| "
- - "**** Simulation run 102, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.694[24,100] 714ns |-----------------------------------------L0.694-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 714ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 714ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 714ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 714ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 714ns 192kb |-----L0.?------| "
- - "**** Simulation run 103, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.695[24,100] 715ns |-----------------------------------------L0.695-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 715ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 715ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 715ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 715ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 715ns 192kb |-----L0.?------| "
- - "**** Simulation run 104, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.696[24,100] 716ns |-----------------------------------------L0.696-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 716ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 716ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 716ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 716ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 716ns 192kb |-----L0.?------| "
- - "**** Simulation run 105, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.697[24,100] 717ns |-----------------------------------------L0.697-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 717ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 717ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 717ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 717ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 717ns 192kb |-----L0.?------| "
- - "**** Simulation run 106, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.698[24,100] 718ns |-----------------------------------------L0.698-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 718ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 718ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 718ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 718ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 718ns 192kb |-----L0.?------| "
- - "**** Simulation run 107, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.699[24,100] 719ns |-----------------------------------------L0.699-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 719ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 719ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 719ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 719ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 719ns 192kb |-----L0.?------| "
- - "**** Simulation run 108, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.700[24,100] 720ns |-----------------------------------------L0.700-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 720ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 720ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 720ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 720ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 720ns 192kb |-----L0.?------| "
- - "**** Simulation run 109, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.701[24,100] 721ns |-----------------------------------------L0.701-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 721ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 721ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 721ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 721ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 721ns 192kb |-----L0.?------| "
- - "**** Simulation run 110, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.702[24,100] 722ns |-----------------------------------------L0.702-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 722ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 722ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 722ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 722ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 722ns 192kb |-----L0.?------| "
- - "**** Simulation run 111, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.703[24,100] 723ns |-----------------------------------------L0.703-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 723ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 723ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 723ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 723ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 723ns 192kb |-----L0.?------| "
- - "**** Simulation run 112, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.704[24,100] 724ns |-----------------------------------------L0.704-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 724ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 724ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 724ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 724ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 724ns 192kb |-----L0.?------| "
- - "**** Simulation run 113, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.705[24,100] 725ns |-----------------------------------------L0.705-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 725ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 725ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 725ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 725ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 725ns 192kb |-----L0.?------| "
- - "**** Simulation run 114, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.706[24,100] 726ns |-----------------------------------------L0.706-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 726ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 726ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 726ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 726ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 726ns 192kb |-----L0.?------| "
- - "**** Simulation run 115, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.707[24,100] 727ns |-----------------------------------------L0.707-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 727ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 727ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 727ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 727ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 727ns 192kb |-----L0.?------| "
- - "**** Simulation run 116, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.708[24,100] 728ns |-----------------------------------------L0.708-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 728ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 728ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 728ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 728ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 728ns 192kb |-----L0.?------| "
- - "**** Simulation run 117, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.709[24,100] 729ns |-----------------------------------------L0.709-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 729ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 729ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 729ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 729ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 729ns 192kb |-----L0.?------| "
- - "**** Simulation run 118, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.710[24,100] 730ns |-----------------------------------------L0.710-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 730ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 730ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 730ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 730ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 730ns 192kb |-----L0.?------| "
- - "**** Simulation run 119, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.711[24,100] 731ns |-----------------------------------------L0.711-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 731ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 731ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 731ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 731ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 731ns 192kb |-----L0.?------| "
- - "**** Simulation run 120, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.712[24,100] 732ns |-----------------------------------------L0.712-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 732ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 732ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 732ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 732ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 732ns 192kb |-----L0.?------| "
- - "**** Simulation run 121, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.713[24,100] 733ns |-----------------------------------------L0.713-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 733ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 733ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 733ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 733ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 733ns 192kb |-----L0.?------| "
- - "**** Simulation run 122, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.714[24,100] 734ns |-----------------------------------------L0.714-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 734ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 734ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 734ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 734ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 734ns 192kb |-----L0.?------| "
- - "**** Simulation run 123, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.715[24,100] 735ns |-----------------------------------------L0.715-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 735ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 735ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 735ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 735ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 735ns 192kb |-----L0.?------| "
- - "**** Simulation run 124, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.716[24,100] 736ns |-----------------------------------------L0.716-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 736ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 736ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 736ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 736ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 736ns 192kb |-----L0.?------| "
- - "**** Simulation run 125, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.717[24,100] 737ns |-----------------------------------------L0.717-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 737ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 737ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 737ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 737ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 737ns 192kb |-----L0.?------| "
- - "**** Simulation run 126, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.718[24,100] 738ns |-----------------------------------------L0.718-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 738ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 738ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 738ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 738ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 738ns 192kb |-----L0.?------| "
- - "**** Simulation run 127, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.719[24,100] 739ns |-----------------------------------------L0.719-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 739ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 739ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 739ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 739ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 739ns 192kb |-----L0.?------| "
- - "**** Simulation run 128, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.720[24,100] 740ns |-----------------------------------------L0.720-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 740ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 740ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 740ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 740ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 740ns 192kb |-----L0.?------| "
- - "**** Simulation run 129, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.721[24,100] 741ns |-----------------------------------------L0.721-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 741ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 741ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 741ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 741ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 741ns 192kb |-----L0.?------| "
- - "**** Simulation run 130, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.722[24,100] 742ns |-----------------------------------------L0.722-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 742ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 742ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 742ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 742ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 742ns 192kb |-----L0.?------| "
- - "**** Simulation run 131, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.723[24,100] 743ns |-----------------------------------------L0.723-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 743ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 743ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 743ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 743ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 743ns 192kb |-----L0.?------| "
- - "**** Simulation run 132, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.724[24,100] 744ns |-----------------------------------------L0.724-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 744ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 744ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 744ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 744ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 744ns 192kb |-----L0.?------| "
- - "**** Simulation run 133, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.725[24,100] 745ns |-----------------------------------------L0.725-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 745ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 745ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 745ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 745ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 745ns 192kb |-----L0.?------| "
- - "**** Simulation run 134, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.726[24,100] 746ns |-----------------------------------------L0.726-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 746ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 746ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 746ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 746ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 746ns 192kb |-----L0.?------| "
- - "**** Simulation run 135, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.727[24,100] 747ns |-----------------------------------------L0.727-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 747ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 747ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 747ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 747ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 747ns 192kb |-----L0.?------| "
- - "**** Simulation run 136, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.728[24,100] 748ns |-----------------------------------------L0.728-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 748ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 748ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 748ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 748ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 748ns 192kb |-----L0.?------| "
- - "**** Simulation run 137, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.729[24,100] 749ns |-----------------------------------------L0.729-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 749ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 749ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 749ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 749ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 749ns 192kb |-----L0.?------| "
- - "**** Simulation run 138, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.730[24,100] 750ns |-----------------------------------------L0.730-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 750ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 750ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 750ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 750ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 750ns 192kb |-----L0.?------| "
- - "**** Simulation run 139, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.731[24,100] 751ns |-----------------------------------------L0.731-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 751ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 751ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 751ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 751ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 751ns 192kb |-----L0.?------| "
- - "**** Simulation run 140, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.732[24,100] 752ns |-----------------------------------------L0.732-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 752ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 752ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 752ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 752ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 752ns 192kb |-----L0.?------| "
- - "**** Simulation run 141, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.733[24,100] 753ns |-----------------------------------------L0.733-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 753ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 753ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 753ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 753ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 753ns 192kb |-----L0.?------| "
- - "**** Simulation run 142, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.734[24,100] 754ns |-----------------------------------------L0.734-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 754ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 754ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 754ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 754ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 754ns 192kb |-----L0.?------| "
- - "**** Simulation run 143, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.735[24,100] 755ns |-----------------------------------------L0.735-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 755ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 755ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 755ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 755ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 755ns 192kb |-----L0.?------| "
- - "**** Simulation run 144, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.736[24,100] 756ns |-----------------------------------------L0.736-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 756ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 756ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 756ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 756ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 756ns 192kb |-----L0.?------| "
- - "**** Simulation run 145, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.737[24,100] 757ns |-----------------------------------------L0.737-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 757ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 757ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 757ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 757ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 757ns 192kb |-----L0.?------| "
- - "**** Simulation run 146, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.738[24,100] 758ns |-----------------------------------------L0.738-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 758ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 758ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 758ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 758ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 758ns 192kb |-----L0.?------| "
- - "**** Simulation run 147, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.739[24,100] 759ns |-----------------------------------------L0.739-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 759ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 759ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 759ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 759ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 759ns 192kb |-----L0.?------| "
- - "**** Simulation run 148, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.740[24,100] 760ns |-----------------------------------------L0.740-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 760ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 760ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 760ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 760ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 760ns 192kb |-----L0.?------| "
- - "**** Simulation run 149, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.741[24,100] 761ns |-----------------------------------------L0.741-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 761ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 761ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 761ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 761ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 761ns 192kb |-----L0.?------| "
- - "**** Simulation run 150, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.742[24,100] 762ns |-----------------------------------------L0.742-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 762ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 762ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 762ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 762ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 762ns 192kb |-----L0.?------| "
- - "**** Simulation run 151, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.743[24,100] 763ns |-----------------------------------------L0.743-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 763ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 763ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 763ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 763ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 763ns 192kb |-----L0.?------| "
- - "**** Simulation run 152, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.744[24,100] 764ns |-----------------------------------------L0.744-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 764ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 764ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 764ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 764ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 764ns 192kb |-----L0.?------| "
- - "**** Simulation run 153, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.745[24,100] 765ns |-----------------------------------------L0.745-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 765ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 765ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 765ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 765ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 765ns 192kb |-----L0.?------| "
- - "**** Simulation run 154, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.746[24,100] 766ns |-----------------------------------------L0.746-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 766ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 766ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 766ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 766ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 766ns 192kb |-----L0.?------| "
- - "**** Simulation run 155, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.747[24,100] 767ns |-----------------------------------------L0.747-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 767ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 767ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 767ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 767ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 767ns 192kb |-----L0.?------| "
- - "**** Simulation run 156, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.748[24,100] 768ns |-----------------------------------------L0.748-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 768ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 768ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 768ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 768ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 768ns 192kb |-----L0.?------| "
- - "**** Simulation run 157, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.749[24,100] 769ns |-----------------------------------------L0.749-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 769ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 769ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 769ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 769ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 769ns 192kb |-----L0.?------| "
- - "**** Simulation run 158, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.750[24,100] 770ns |-----------------------------------------L0.750-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 770ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 770ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 770ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 770ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 770ns 192kb |-----L0.?------| "
- - "**** Simulation run 159, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.751[24,100] 771ns |-----------------------------------------L0.751-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 771ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 771ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 771ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 771ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 771ns 192kb |-----L0.?------| "
- - "**** Simulation run 160, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.752[24,100] 772ns |-----------------------------------------L0.752-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 772ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 772ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 772ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 772ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 772ns 192kb |-----L0.?------| "
- - "**** Simulation run 161, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.753[24,100] 773ns |-----------------------------------------L0.753-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 773ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 773ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 773ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 773ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 773ns 192kb |-----L0.?------| "
- - "**** Simulation run 162, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.754[24,100] 774ns |-----------------------------------------L0.754-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 774ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 774ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 774ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 774ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 774ns 192kb |-----L0.?------| "
- - "**** Simulation run 163, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.755[24,100] 775ns |-----------------------------------------L0.755-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 775ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 775ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 775ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 775ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 775ns 192kb |-----L0.?------| "
- - "**** Simulation run 164, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.758[24,100] 778ns |-----------------------------------------L0.758-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 778ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 778ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 778ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 778ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 778ns 192kb |-----L0.?------| "
- - "**** Simulation run 165, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.759[24,100] 779ns |-----------------------------------------L0.759-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 779ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 779ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 779ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 779ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 779ns 192kb |-----L0.?------| "
- - "**** Simulation run 166, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.760[24,100] 780ns |-----------------------------------------L0.760-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 780ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 780ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 780ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 780ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 780ns 192kb |-----L0.?------| "
- - "**** Simulation run 167, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.761[24,100] 781ns |-----------------------------------------L0.761-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 781ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 781ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 781ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 781ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 781ns 192kb |-----L0.?------| "
- - "**** Simulation run 168, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.762[24,100] 782ns |-----------------------------------------L0.762-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 782ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 782ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 782ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 782ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 782ns 192kb |-----L0.?------| "
- - "**** Simulation run 169, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.763[24,100] 783ns |-----------------------------------------L0.763-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 783ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 783ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 783ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 783ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 783ns 192kb |-----L0.?------| "
- - "**** Simulation run 170, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.764[24,100] 784ns |-----------------------------------------L0.764-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 784ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 784ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 784ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 784ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 784ns 192kb |-----L0.?------| "
- - "**** Simulation run 171, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.765[24,100] 785ns |-----------------------------------------L0.765-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 785ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 785ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 785ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 785ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 785ns 192kb |-----L0.?------| "
- - "**** Simulation run 172, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.756[24,100] 776ns |-----------------------------------------L0.756-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 776ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 776ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 776ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 776ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 776ns 192kb |-----L0.?------| "
- - "**** Simulation run 173, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.757[24,100] 777ns |-----------------------------------------L0.757-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 777ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 777ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 777ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 777ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 777ns 192kb |-----L0.?------| "
- - "**** Simulation run 174, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.766[24,100] 786ns |-----------------------------------------L0.766-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 786ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 786ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 786ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 786ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 786ns 192kb |-----L0.?------| "
- - "**** Simulation run 175, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.767[24,100] 787ns |-----------------------------------------L0.767-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 787ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 787ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 787ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 787ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 787ns 192kb |-----L0.?------| "
- - "**** Simulation run 176, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.768[24,100] 788ns |-----------------------------------------L0.768-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 788ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 788ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 788ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 788ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 788ns 192kb |-----L0.?------| "
- - "**** Simulation run 177, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.769[24,100] 789ns |-----------------------------------------L0.769-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 789ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 789ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 789ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 789ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 789ns 192kb |-----L0.?------| "
- - "**** Simulation run 178, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.770[24,100] 790ns |-----------------------------------------L0.770-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 790ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 790ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 790ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 790ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 790ns 192kb |-----L0.?------| "
- - "**** Simulation run 179, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.771[24,100] 791ns |-----------------------------------------L0.771-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 791ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 791ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 791ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 791ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 791ns 192kb |-----L0.?------| "
- - "**** Simulation run 180, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.772[24,100] 792ns |-----------------------------------------L0.772-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 792ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 792ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 792ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 792ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 792ns 192kb |-----L0.?------| "
- - "**** Simulation run 181, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.773[24,100] 793ns |-----------------------------------------L0.773-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 793ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 793ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 793ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 793ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 793ns 192kb |-----L0.?------| "
- - "**** Simulation run 182, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.774[24,100] 794ns |-----------------------------------------L0.774-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 794ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 794ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 794ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 794ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 794ns 192kb |-----L0.?------| "
- - "**** Simulation run 183, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.775[24,100] 795ns |-----------------------------------------L0.775-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 795ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 795ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 795ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 795ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 795ns 192kb |-----L0.?------| "
- - "**** Simulation run 184, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.776[24,100] 796ns |-----------------------------------------L0.776-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 796ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 796ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 796ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 796ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 796ns 192kb |-----L0.?------| "
- - "**** Simulation run 185, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.777[24,100] 797ns |-----------------------------------------L0.777-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 797ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 797ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 797ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 797ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 797ns 192kb |-----L0.?------| "
- - "**** Simulation run 186, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.778[24,100] 798ns |-----------------------------------------L0.778-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 798ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 798ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 798ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 798ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 798ns 192kb |-----L0.?------| "
- - "**** Simulation run 187, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.779[24,100] 799ns |-----------------------------------------L0.779-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 799ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 799ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 799ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 799ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 799ns 192kb |-----L0.?------| "
- - "**** Simulation run 188, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.780[24,100] 800ns |-----------------------------------------L0.780-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 800ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 800ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 800ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 800ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 800ns 192kb |-----L0.?------| "
- - "**** Simulation run 189, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.781[24,100] 801ns |-----------------------------------------L0.781-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 801ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 801ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 801ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 801ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 801ns 192kb |-----L0.?------| "
- - "**** Simulation run 190, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.782[24,100] 802ns |-----------------------------------------L0.782-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 802ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 802ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 802ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 802ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 802ns 192kb |-----L0.?------| "
- - "**** Simulation run 191, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.783[24,100] 803ns |-----------------------------------------L0.783-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 803ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 803ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 803ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 803ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 803ns 192kb |-----L0.?------| "
- - "**** Simulation run 192, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.784[24,100] 804ns |-----------------------------------------L0.784-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 804ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 804ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 804ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 804ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 804ns 192kb |-----L0.?------| "
- - "**** Simulation run 193, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.785[24,100] 805ns |-----------------------------------------L0.785-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 805ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 805ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 805ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 805ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 805ns 192kb |-----L0.?------| "
- - "**** Simulation run 194, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.786[24,100] 806ns |-----------------------------------------L0.786-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 806ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 806ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 806ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 806ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 806ns 192kb |-----L0.?------| "
- - "**** Simulation run 195, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.787[24,100] 807ns |-----------------------------------------L0.787-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 807ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 807ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 807ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 807ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 807ns 192kb |-----L0.?------| "
- - "**** Simulation run 196, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.788[24,100] 808ns |-----------------------------------------L0.788-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 808ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 808ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 808ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 808ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 808ns 192kb |-----L0.?------| "
- - "**** Simulation run 197, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.789[24,100] 809ns |-----------------------------------------L0.789-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 809ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 809ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 809ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 809ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 809ns 192kb |-----L0.?------| "
- - "**** Simulation run 198, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.790[24,100] 810ns |-----------------------------------------L0.790-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 810ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 810ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 810ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 810ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 810ns 192kb |-----L0.?------| "
- - "**** Simulation run 199, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.791[24,100] 811ns |-----------------------------------------L0.791-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 811ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 811ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 811ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 811ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 811ns 192kb |-----L0.?------| "
- - "**** Simulation run 200, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.792[24,100] 812ns |-----------------------------------------L0.792-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 812ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 812ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 812ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 812ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 812ns 192kb |-----L0.?------| "
- - "**** Simulation run 201, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.793[24,100] 813ns |-----------------------------------------L0.793-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 813ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 813ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 813ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 813ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 813ns 192kb |-----L0.?------| "
- - "**** Simulation run 202, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.794[24,100] 814ns |-----------------------------------------L0.794-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 814ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 814ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 814ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 814ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 814ns 192kb |-----L0.?------| "
- - "**** Simulation run 203, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.795[24,100] 815ns |-----------------------------------------L0.795-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 815ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 815ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 815ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 815ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 815ns 192kb |-----L0.?------| "
- - "**** Simulation run 204, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.796[24,100] 816ns |-----------------------------------------L0.796-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 816ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 816ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 816ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 816ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 816ns 192kb |-----L0.?------| "
- - "**** Simulation run 205, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.797[24,100] 817ns |-----------------------------------------L0.797-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 817ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 817ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 817ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 817ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 817ns 192kb |-----L0.?------| "
- - "**** Simulation run 206, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.798[24,100] 818ns |-----------------------------------------L0.798-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 818ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 818ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 818ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 818ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 818ns 192kb |-----L0.?------| "
- - "**** Simulation run 207, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.799[24,100] 819ns |-----------------------------------------L0.799-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 819ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 819ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 819ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 819ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 819ns 192kb |-----L0.?------| "
- - "**** Simulation run 208, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.800[24,100] 820ns |-----------------------------------------L0.800-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 820ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 820ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 820ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 820ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 820ns 192kb |-----L0.?------| "
- - "**** Simulation run 209, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.801[24,100] 821ns |-----------------------------------------L0.801-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 821ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 821ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 821ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 821ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 821ns 192kb |-----L0.?------| "
- - "**** Simulation run 210, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.802[24,100] 822ns |-----------------------------------------L0.802-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 822ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 822ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 822ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 822ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 822ns 192kb |-----L0.?------| "
- - "**** Simulation run 211, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.803[24,100] 823ns |-----------------------------------------L0.803-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 823ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 823ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 823ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 823ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 823ns 192kb |-----L0.?------| "
- - "**** Simulation run 212, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.804[24,100] 824ns |-----------------------------------------L0.804-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 824ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 824ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 824ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 824ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 824ns 192kb |-----L0.?------| "
- - "**** Simulation run 213, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.805[24,100] 825ns |-----------------------------------------L0.805-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 825ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 825ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 825ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 825ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 825ns 192kb |-----L0.?------| "
- - "**** Simulation run 214, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.806[24,100] 826ns |-----------------------------------------L0.806-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 826ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 826ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 826ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 826ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 826ns 192kb |-----L0.?------| "
- - "**** Simulation run 215, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.807[24,100] 827ns |-----------------------------------------L0.807-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 827ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 827ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 827ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 827ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 827ns 192kb |-----L0.?------| "
- - "**** Simulation run 216, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.808[24,100] 828ns |-----------------------------------------L0.808-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 828ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 828ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 828ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 828ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 828ns 192kb |-----L0.?------| "
- - "**** Simulation run 217, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.809[24,100] 829ns |-----------------------------------------L0.809-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 829ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 829ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 829ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 829ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 829ns 192kb |-----L0.?------| "
- - "**** Simulation run 218, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.810[24,100] 830ns |-----------------------------------------L0.810-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 830ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 830ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 830ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 830ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 830ns 192kb |-----L0.?------| "
- - "**** Simulation run 219, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.811[24,100] 831ns |-----------------------------------------L0.811-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 831ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 831ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 831ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 831ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 831ns 192kb |-----L0.?------| "
- - "**** Simulation run 220, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.812[24,100] 832ns |-----------------------------------------L0.812-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 832ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 832ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 832ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 832ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 832ns 192kb |-----L0.?------| "
- - "**** Simulation run 221, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.813[24,100] 833ns |-----------------------------------------L0.813-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 833ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 833ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 833ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 833ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 833ns 192kb |-----L0.?------| "
- - "**** Simulation run 222, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.814[24,100] 834ns |-----------------------------------------L0.814-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 834ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 834ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 834ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 834ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 834ns 192kb |-----L0.?------| "
- - "**** Simulation run 223, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.815[24,100] 835ns |-----------------------------------------L0.815-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 835ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 835ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 835ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 835ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 835ns 192kb |-----L0.?------| "
- - "**** Simulation run 224, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.816[24,100] 836ns |-----------------------------------------L0.816-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 836ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 836ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 836ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 836ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 836ns 192kb |-----L0.?------| "
- - "**** Simulation run 225, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.817[24,100] 837ns |-----------------------------------------L0.817-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 837ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 837ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 837ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 837ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 837ns 192kb |-----L0.?------| "
- - "**** Simulation run 226, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.818[24,100] 838ns |-----------------------------------------L0.818-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 838ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 838ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 838ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 838ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 838ns 192kb |-----L0.?------| "
- - "**** Simulation run 227, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.819[24,100] 839ns |-----------------------------------------L0.819-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 839ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 839ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 839ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 839ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 839ns 192kb |-----L0.?------| "
- - "**** Simulation run 228, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.820[24,100] 840ns |-----------------------------------------L0.820-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 840ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 840ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 840ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 840ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 840ns 192kb |-----L0.?------| "
- - "**** Simulation run 229, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.821[24,100] 841ns |-----------------------------------------L0.821-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 841ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 841ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 841ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 841ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 841ns 192kb |-----L0.?------| "
- - "**** Simulation run 230, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.822[24,100] 842ns |-----------------------------------------L0.822-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 842ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 842ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 842ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 842ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 842ns 192kb |-----L0.?------| "
- - "**** Simulation run 231, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.823[24,100] 843ns |-----------------------------------------L0.823-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 843ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 843ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 843ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 843ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 843ns 192kb |-----L0.?------| "
- - "**** Simulation run 232, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.824[24,100] 844ns |-----------------------------------------L0.824-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 844ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 844ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 844ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 844ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 844ns 192kb |-----L0.?------| "
- - "**** Simulation run 233, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.825[24,100] 845ns |-----------------------------------------L0.825-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 845ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 845ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 845ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 845ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 845ns 192kb |-----L0.?------| "
- - "**** Simulation run 234, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.826[24,100] 846ns |-----------------------------------------L0.826-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 846ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 846ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 846ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 846ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 846ns 192kb |-----L0.?------| "
- - "**** Simulation run 235, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.827[24,100] 847ns |-----------------------------------------L0.827-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 847ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 847ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 847ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 847ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 847ns 192kb |-----L0.?------| "
- - "**** Simulation run 236, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.828[24,100] 848ns |-----------------------------------------L0.828-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 848ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 848ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 848ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 848ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 848ns 192kb |-----L0.?------| "
- - "**** Simulation run 237, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.829[24,100] 849ns |-----------------------------------------L0.829-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 849ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 849ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 849ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 849ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 849ns 192kb |-----L0.?------| "
- - "**** Simulation run 238, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.830[24,100] 850ns |-----------------------------------------L0.830-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 850ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 850ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 850ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 850ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 850ns 192kb |-----L0.?------| "
- - "**** Simulation run 239, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.831[24,100] 851ns |-----------------------------------------L0.831-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 851ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 851ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 851ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 851ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 851ns 192kb |-----L0.?------| "
- - "**** Simulation run 240, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.832[24,100] 852ns |-----------------------------------------L0.832-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 852ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 852ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 852ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 852ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 852ns 192kb |-----L0.?------| "
- - "**** Simulation run 241, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.833[24,100] 853ns |-----------------------------------------L0.833-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 853ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 853ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 853ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 853ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 853ns 192kb |-----L0.?------| "
- - "**** Simulation run 242, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.834[24,100] 854ns |-----------------------------------------L0.834-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 854ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 854ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 854ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 854ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 854ns 192kb |-----L0.?------| "
- - "**** Simulation run 243, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.835[24,100] 855ns |-----------------------------------------L0.835-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 855ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 855ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 855ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 855ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 855ns 192kb |-----L0.?------| "
- - "**** Simulation run 244, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.836[24,100] 856ns |-----------------------------------------L0.836-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 856ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 856ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 856ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 856ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 856ns 192kb |-----L0.?------| "
- - "**** Simulation run 245, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.837[24,100] 857ns |-----------------------------------------L0.837-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 857ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 857ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 857ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 857ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 857ns 192kb |-----L0.?------| "
- - "**** Simulation run 246, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.838[24,100] 858ns |-----------------------------------------L0.838-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 858ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 858ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 858ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 858ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 858ns 192kb |-----L0.?------| "
- - "**** Simulation run 247, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.839[24,100] 859ns |-----------------------------------------L0.839-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 859ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 859ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 859ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 859ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 859ns 192kb |-----L0.?------| "
- - "**** Simulation run 248, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.840[24,100] 860ns |-----------------------------------------L0.840-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 860ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 860ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 860ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 860ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 860ns 192kb |-----L0.?------| "
- - "**** Simulation run 249, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.841[24,100] 861ns |-----------------------------------------L0.841-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 861ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 861ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 861ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 861ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 861ns 192kb |-----L0.?------| "
- - "**** Simulation run 250, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.842[24,100] 862ns |-----------------------------------------L0.842-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 862ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 862ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 862ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 862ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 862ns 192kb |-----L0.?------| "
- - "**** Simulation run 251, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.843[24,100] 863ns |-----------------------------------------L0.843-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 863ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 863ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 863ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 863ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 863ns 192kb |-----L0.?------| "
- - "**** Simulation run 252, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.844[24,100] 864ns |-----------------------------------------L0.844-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 864ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 864ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 864ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 864ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 864ns 192kb |-----L0.?------| "
- - "**** Simulation run 253, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.845[24,100] 865ns |-----------------------------------------L0.845-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 865ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 865ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 865ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 865ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 865ns 192kb |-----L0.?------| "
- - "**** Simulation run 254, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.846[24,100] 866ns |-----------------------------------------L0.846-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 866ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 866ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 866ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 866ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 866ns 192kb |-----L0.?------| "
- - "**** Simulation run 255, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.847[24,100] 867ns |-----------------------------------------L0.847-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 867ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 867ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 867ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 867ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 867ns 192kb |-----L0.?------| "
- - "**** Simulation run 256, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.848[24,100] 868ns |-----------------------------------------L0.848-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 868ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 868ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 868ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 868ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 868ns 192kb |-----L0.?------| "
- - "**** Simulation run 257, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.849[24,100] 869ns |-----------------------------------------L0.849-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 869ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 869ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 869ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 869ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 869ns 192kb |-----L0.?------| "
- - "**** Simulation run 258, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.850[24,100] 870ns |-----------------------------------------L0.850-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 870ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 870ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 870ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 870ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 870ns 192kb |-----L0.?------| "
- - "**** Simulation run 259, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.851[24,100] 871ns |-----------------------------------------L0.851-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 871ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 871ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 871ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 871ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 871ns 192kb |-----L0.?------| "
- - "**** Simulation run 260, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.852[24,100] 872ns |-----------------------------------------L0.852-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 872ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 872ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 872ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 872ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 872ns 192kb |-----L0.?------| "
- - "**** Simulation run 261, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.853[24,100] 873ns |-----------------------------------------L0.853-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 873ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 873ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 873ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 873ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 873ns 192kb |-----L0.?------| "
- - "**** Simulation run 262, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.854[24,100] 874ns |-----------------------------------------L0.854-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 874ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 874ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 874ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 874ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 874ns 192kb |-----L0.?------| "
- - "**** Simulation run 263, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.855[24,100] 875ns |-----------------------------------------L0.855-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 875ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 875ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 875ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 875ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 875ns 192kb |-----L0.?------| "
- - "**** Simulation run 264, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.856[24,100] 876ns |-----------------------------------------L0.856-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 876ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 876ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 876ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 876ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 876ns 192kb |-----L0.?------| "
- - "**** Simulation run 265, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.857[24,100] 877ns |-----------------------------------------L0.857-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 877ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 877ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 877ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 877ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 877ns 192kb |-----L0.?------| "
- - "**** Simulation run 266, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.858[24,100] 878ns |-----------------------------------------L0.858-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 878ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 878ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 878ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 878ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 878ns 192kb |-----L0.?------| "
- - "**** Simulation run 267, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.859[24,100] 879ns |-----------------------------------------L0.859-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 879ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 879ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 879ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 879ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 879ns 192kb |-----L0.?------| "
- - "**** Simulation run 268, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.860[24,100] 880ns |-----------------------------------------L0.860-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 880ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 880ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 880ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 880ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 880ns 192kb |-----L0.?------| "
- - "**** Simulation run 269, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.861[24,100] 881ns |-----------------------------------------L0.861-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 881ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 881ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 881ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 881ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 881ns 192kb |-----L0.?------| "
- - "**** Simulation run 270, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.862[24,100] 882ns |-----------------------------------------L0.862-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 882ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 882ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 882ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 882ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 882ns 192kb |-----L0.?------| "
- - "**** Simulation run 271, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.863[24,100] 883ns |-----------------------------------------L0.863-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 883ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 883ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 883ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 883ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 883ns 192kb |-----L0.?------| "
- - "**** Simulation run 272, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.864[24,100] 884ns |-----------------------------------------L0.864-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 884ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 884ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 884ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 884ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 884ns 192kb |-----L0.?------| "
- - "**** Simulation run 273, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.865[24,100] 885ns |-----------------------------------------L0.865-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 885ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 885ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 885ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 885ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 885ns 192kb |-----L0.?------| "
- - "**** Simulation run 274, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.866[24,100] 886ns |-----------------------------------------L0.866-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 886ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 886ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 886ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 886ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 886ns 192kb |-----L0.?------| "
- - "**** Simulation run 275, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.867[24,100] 887ns |-----------------------------------------L0.867-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 887ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 887ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 887ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 887ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 887ns 192kb |-----L0.?------| "
- - "**** Simulation run 276, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.868[24,100] 888ns |-----------------------------------------L0.868-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 888ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 888ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 888ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 888ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 888ns 192kb |-----L0.?------| "
- - "**** Simulation run 277, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.869[24,100] 889ns |-----------------------------------------L0.869-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 889ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 889ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 889ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 889ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 889ns 192kb |-----L0.?------| "
- - "**** Simulation run 278, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.870[24,100] 890ns |-----------------------------------------L0.870-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 890ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 890ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 890ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 890ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 890ns 192kb |-----L0.?------| "
- - "**** Simulation run 279, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.871[24,100] 891ns |-----------------------------------------L0.871-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 891ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 891ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 891ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 891ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 891ns 192kb |-----L0.?------| "
- - "**** Simulation run 280, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.872[24,100] 892ns |-----------------------------------------L0.872-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 892ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 892ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 892ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 892ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 892ns 192kb |-----L0.?------| "
- - "**** Simulation run 281, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.873[24,100] 893ns |-----------------------------------------L0.873-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 893ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 893ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 893ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 893ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 893ns 192kb |-----L0.?------| "
- - "**** Simulation run 282, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.874[24,100] 894ns |-----------------------------------------L0.874-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 894ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 894ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 894ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 894ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 894ns 192kb |-----L0.?------| "
- - "**** Simulation run 283, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.875[24,100] 895ns |-----------------------------------------L0.875-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 895ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 895ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 895ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 895ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 895ns 192kb |-----L0.?------| "
- - "**** Simulation run 284, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.876[24,100] 896ns |-----------------------------------------L0.876-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 896ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 896ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 896ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 896ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 896ns 192kb |-----L0.?------| "
- - "**** Simulation run 285, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.877[24,100] 897ns |-----------------------------------------L0.877-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 897ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 897ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 897ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 897ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 897ns 192kb |-----L0.?------| "
- - "**** Simulation run 286, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.878[24,100] 898ns |-----------------------------------------L0.878-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 898ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 898ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 898ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 898ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 898ns 192kb |-----L0.?------| "
- - "**** Simulation run 287, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.879[24,100] 899ns |-----------------------------------------L0.879-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 899ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 899ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 899ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 899ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 899ns 192kb |-----L0.?------| "
- - "**** Simulation run 288, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.880[24,100] 900ns |-----------------------------------------L0.880-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 900ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 900ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 900ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 900ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 900ns 192kb |-----L0.?------| "
- - "**** Simulation run 289, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.881[24,100] 901ns |-----------------------------------------L0.881-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 901ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 901ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 901ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 901ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 901ns 192kb |-----L0.?------| "
- - "**** Simulation run 290, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.882[24,100] 902ns |-----------------------------------------L0.882-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 902ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 902ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 902ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 902ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 902ns 192kb |-----L0.?------| "
- - "**** Simulation run 291, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.883[24,100] 903ns |-----------------------------------------L0.883-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 903ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 903ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 903ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 903ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 903ns 192kb |-----L0.?------| "
- - "**** Simulation run 292, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.886[24,100] 906ns |-----------------------------------------L0.886-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 906ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 906ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 906ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 906ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 906ns 192kb |-----L0.?------| "
- - "**** Simulation run 293, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.887[24,100] 907ns |-----------------------------------------L0.887-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 907ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 907ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 907ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 907ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 907ns 192kb |-----L0.?------| "
- - "**** Simulation run 294, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.888[24,100] 908ns |-----------------------------------------L0.888-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 908ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 908ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 908ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 908ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 908ns 192kb |-----L0.?------| "
- - "**** Simulation run 295, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.889[24,100] 909ns |-----------------------------------------L0.889-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 909ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 909ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 909ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 909ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 909ns 192kb |-----L0.?------| "
- - "**** Simulation run 296, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.890[24,100] 910ns |-----------------------------------------L0.890-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 910ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 910ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 910ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 910ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 910ns 192kb |-----L0.?------| "
- - "**** Simulation run 297, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.891[24,100] 911ns |-----------------------------------------L0.891-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 911ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 911ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 911ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 911ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 911ns 192kb |-----L0.?------| "
- - "**** Simulation run 298, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.892[24,100] 912ns |-----------------------------------------L0.892-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 912ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 912ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 912ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 912ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 912ns 192kb |-----L0.?------| "
- - "**** Simulation run 299, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.893[24,100] 913ns |-----------------------------------------L0.893-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 913ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 913ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 913ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 913ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 913ns 192kb |-----L0.?------| "
- - "**** Simulation run 300, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.884[24,100] 904ns |-----------------------------------------L0.884-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 904ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 904ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 904ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 904ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 904ns 192kb |-----L0.?------| "
- - "**** Simulation run 301, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.885[24,100] 905ns |-----------------------------------------L0.885-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 905ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 905ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 905ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 905ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 905ns 192kb |-----L0.?------| "
- - "**** Simulation run 302, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.894[24,100] 914ns |-----------------------------------------L0.894-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 914ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 914ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 914ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 914ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 914ns 192kb |-----L0.?------| "
- - "**** Simulation run 303, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.895[24,100] 915ns |-----------------------------------------L0.895-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 915ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 915ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 915ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 915ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 915ns 192kb |-----L0.?------| "
- - "**** Simulation run 304, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.896[24,100] 916ns |-----------------------------------------L0.896-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 916ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 916ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 916ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 916ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 916ns 192kb |-----L0.?------| "
- - "**** Simulation run 305, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.897[24,100] 917ns |-----------------------------------------L0.897-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 917ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 917ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 917ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 917ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 917ns 192kb |-----L0.?------| "
- - "**** Simulation run 306, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.898[24,100] 918ns |-----------------------------------------L0.898-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 918ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 918ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 918ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 918ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 918ns 192kb |-----L0.?------| "
- - "**** Simulation run 307, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.899[24,100] 919ns |-----------------------------------------L0.899-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 919ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 919ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 919ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 919ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 919ns 192kb |-----L0.?------| "
- - "**** Simulation run 308, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.900[24,100] 920ns |-----------------------------------------L0.900-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 920ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 920ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 920ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 920ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 920ns 192kb |-----L0.?------| "
- - "**** Simulation run 309, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.901[24,100] 921ns |-----------------------------------------L0.901-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 921ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 921ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 921ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 921ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 921ns 192kb |-----L0.?------| "
- - "**** Simulation run 310, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.902[24,100] 922ns |-----------------------------------------L0.902-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 922ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 922ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 922ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 922ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 922ns 192kb |-----L0.?------| "
- - "**** Simulation run 311, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.903[24,100] 923ns |-----------------------------------------L0.903-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 923ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 923ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 923ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 923ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 923ns 192kb |-----L0.?------| "
- - "**** Simulation run 312, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.904[24,100] 924ns |-----------------------------------------L0.904-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 924ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 924ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 924ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 924ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 924ns 192kb |-----L0.?------| "
- - "**** Simulation run 313, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.905[24,100] 925ns |-----------------------------------------L0.905-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 925ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 925ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 925ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 925ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 925ns 192kb |-----L0.?------| "
- - "**** Simulation run 314, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.906[24,100] 926ns |-----------------------------------------L0.906-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 926ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 926ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 926ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 926ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 926ns 192kb |-----L0.?------| "
- - "**** Simulation run 315, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.907[24,100] 927ns |-----------------------------------------L0.907-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 927ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 927ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 927ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 927ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 927ns 192kb |-----L0.?------| "
- - "**** Simulation run 316, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.908[24,100] 928ns |-----------------------------------------L0.908-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 928ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 928ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 928ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 928ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 928ns 192kb |-----L0.?------| "
- - "**** Simulation run 317, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.909[24,100] 929ns |-----------------------------------------L0.909-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 929ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 929ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 929ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 929ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 929ns 192kb |-----L0.?------| "
- - "**** Simulation run 318, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.910[24,100] 930ns |-----------------------------------------L0.910-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 930ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 930ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 930ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 930ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 930ns 192kb |-----L0.?------| "
- - "**** Simulation run 319, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.911[24,100] 931ns |-----------------------------------------L0.911-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 931ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 931ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 931ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 931ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 931ns 192kb |-----L0.?------| "
- - "**** Simulation run 320, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.912[24,100] 932ns |-----------------------------------------L0.912-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 932ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 932ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 932ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 932ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 932ns 192kb |-----L0.?------| "
- - "**** Simulation run 321, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.913[24,100] 933ns |-----------------------------------------L0.913-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 933ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 933ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 933ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 933ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 933ns 192kb |-----L0.?------| "
- - "**** Simulation run 322, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.914[24,100] 934ns |-----------------------------------------L0.914-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 934ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 934ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 934ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 934ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 934ns 192kb |-----L0.?------| "
- - "**** Simulation run 323, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.915[24,100] 935ns |-----------------------------------------L0.915-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 935ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 935ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 935ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 935ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 935ns 192kb |-----L0.?------| "
- - "**** Simulation run 324, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.916[24,100] 936ns |-----------------------------------------L0.916-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 936ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 936ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 936ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 936ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 936ns 192kb |-----L0.?------| "
- - "**** Simulation run 325, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.917[24,100] 937ns |-----------------------------------------L0.917-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 937ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 937ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 937ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 937ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 937ns 192kb |-----L0.?------| "
- - "**** Simulation run 326, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.918[24,100] 938ns |-----------------------------------------L0.918-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 938ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 938ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 938ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 938ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 938ns 192kb |-----L0.?------| "
- - "**** Simulation run 327, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.919[24,100] 939ns |-----------------------------------------L0.919-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 939ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 939ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 939ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 939ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 939ns 192kb |-----L0.?------| "
- - "**** Simulation run 328, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.920[24,100] 940ns |-----------------------------------------L0.920-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 940ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 940ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 940ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 940ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 940ns 192kb |-----L0.?------| "
- - "**** Simulation run 329, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.921[24,100] 941ns |-----------------------------------------L0.921-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 941ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 941ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 941ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 941ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 941ns 192kb |-----L0.?------| "
- - "**** Simulation run 330, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.922[24,100] 942ns |-----------------------------------------L0.922-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 942ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 942ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 942ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 942ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 942ns 192kb |-----L0.?------| "
- - "**** Simulation run 331, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.923[24,100] 943ns |-----------------------------------------L0.923-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 943ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 943ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 943ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 943ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 943ns 192kb |-----L0.?------| "
- - "**** Simulation run 332, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.924[24,100] 944ns |-----------------------------------------L0.924-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 944ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 944ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 944ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 944ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 944ns 192kb |-----L0.?------| "
- - "**** Simulation run 333, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.925[24,100] 945ns |-----------------------------------------L0.925-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 945ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 945ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 945ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 945ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 945ns 192kb |-----L0.?------| "
- - "**** Simulation run 334, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.926[24,100] 946ns |-----------------------------------------L0.926-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 946ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 946ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 946ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 946ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 946ns 192kb |-----L0.?------| "
- - "**** Simulation run 335, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.927[24,100] 947ns |-----------------------------------------L0.927-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 947ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 947ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 947ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 947ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 947ns 192kb |-----L0.?------| "
- - "**** Simulation run 336, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.928[24,100] 948ns |-----------------------------------------L0.928-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 948ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 948ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 948ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 948ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 948ns 192kb |-----L0.?------| "
- - "**** Simulation run 337, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.929[24,100] 949ns |-----------------------------------------L0.929-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 949ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 949ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 949ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 949ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 949ns 192kb |-----L0.?------| "
- - "**** Simulation run 338, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.930[24,100] 950ns |-----------------------------------------L0.930-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 950ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 950ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 950ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 950ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 950ns 192kb |-----L0.?------| "
- - "**** Simulation run 339, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.931[24,100] 951ns |-----------------------------------------L0.931-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 951ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 951ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 951ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 951ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 951ns 192kb |-----L0.?------| "
- - "**** Simulation run 340, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.932[24,100] 952ns |-----------------------------------------L0.932-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 952ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 952ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 952ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 952ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 952ns 192kb |-----L0.?------| "
- - "**** Simulation run 341, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.933[24,100] 953ns |-----------------------------------------L0.933-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 953ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 953ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 953ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 953ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 953ns 192kb |-----L0.?------| "
- - "**** Simulation run 342, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.934[24,100] 954ns |-----------------------------------------L0.934-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 954ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 954ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 954ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 954ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 954ns 192kb |-----L0.?------| "
- - "**** Simulation run 343, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.935[24,100] 955ns |-----------------------------------------L0.935-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 955ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 955ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 955ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 955ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 955ns 192kb |-----L0.?------| "
- - "**** Simulation run 344, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.936[24,100] 956ns |-----------------------------------------L0.936-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 956ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 956ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 956ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 956ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 956ns 192kb |-----L0.?------| "
- - "**** Simulation run 345, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.937[24,100] 957ns |-----------------------------------------L0.937-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 957ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 957ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 957ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 957ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 957ns 192kb |-----L0.?------| "
- - "**** Simulation run 346, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.938[24,100] 958ns |-----------------------------------------L0.938-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 958ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 958ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 958ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 958ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 958ns 192kb |-----L0.?------| "
- - "**** Simulation run 347, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.939[24,100] 959ns |-----------------------------------------L0.939-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 959ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 959ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 959ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 959ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 959ns 192kb |-----L0.?------| "
- - "**** Simulation run 348, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.940[24,100] 960ns |-----------------------------------------L0.940-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 960ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 960ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 960ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 960ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 960ns 192kb |-----L0.?------| "
- - "**** Simulation run 349, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.941[24,100] 961ns |-----------------------------------------L0.941-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 961ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 961ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 961ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 961ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 961ns 192kb |-----L0.?------| "
- - "**** Simulation run 350, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.942[24,100] 962ns |-----------------------------------------L0.942-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 962ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 962ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 962ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 962ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 962ns 192kb |-----L0.?------| "
- - "**** Simulation run 351, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.943[24,100] 963ns |-----------------------------------------L0.943-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 963ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 963ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 963ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 963ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 963ns 192kb |-----L0.?------| "
- - "**** Simulation run 352, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.944[24,100] 964ns |-----------------------------------------L0.944-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 964ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 964ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 964ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 964ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 964ns 192kb |-----L0.?------| "
- - "**** Simulation run 353, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.945[24,100] 965ns |-----------------------------------------L0.945-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 965ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 965ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 965ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 965ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 965ns 192kb |-----L0.?------| "
- - "**** Simulation run 354, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.946[24,100] 966ns |-----------------------------------------L0.946-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 966ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 966ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 966ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 966ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 966ns 192kb |-----L0.?------| "
- - "**** Simulation run 355, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.947[24,100] 967ns |-----------------------------------------L0.947-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 967ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 967ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 967ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 967ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 967ns 192kb |-----L0.?------| "
- - "**** Simulation run 356, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.948[24,100] 968ns |-----------------------------------------L0.948-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 968ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 968ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 968ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 968ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 968ns 192kb |-----L0.?------| "
- - "**** Simulation run 357, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.949[24,100] 969ns |-----------------------------------------L0.949-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 969ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 969ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 969ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 969ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 969ns 192kb |-----L0.?------| "
- - "**** Simulation run 358, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.950[24,100] 970ns |-----------------------------------------L0.950-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 970ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 970ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 970ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 970ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 970ns 192kb |-----L0.?------| "
- - "**** Simulation run 359, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.951[24,100] 971ns |-----------------------------------------L0.951-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 971ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 971ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 971ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 971ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 971ns 192kb |-----L0.?------| "
- - "**** Simulation run 360, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.952[24,100] 972ns |-----------------------------------------L0.952-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 972ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 972ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 972ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 972ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 972ns 192kb |-----L0.?------| "
- - "**** Simulation run 361, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.953[24,100] 973ns |-----------------------------------------L0.953-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 973ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 973ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 973ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 973ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 973ns 192kb |-----L0.?------| "
- - "**** Simulation run 362, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.954[24,100] 974ns |-----------------------------------------L0.954-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 974ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 974ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 974ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 974ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 974ns 192kb |-----L0.?------| "
- - "**** Simulation run 363, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.955[24,100] 975ns |-----------------------------------------L0.955-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 975ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 975ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 975ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 975ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 975ns 192kb |-----L0.?------| "
- - "**** Simulation run 364, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.956[24,100] 976ns |-----------------------------------------L0.956-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 976ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 976ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 976ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 976ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 976ns 192kb |-----L0.?------| "
- - "**** Simulation run 365, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.957[24,100] 977ns |-----------------------------------------L0.957-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 977ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 977ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 977ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 977ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 977ns 192kb |-----L0.?------| "
- - "**** Simulation run 366, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.958[24,100] 978ns |-----------------------------------------L0.958-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 978ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 978ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 978ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 978ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 978ns 192kb |-----L0.?------| "
- - "**** Simulation run 367, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.959[24,100] 979ns |-----------------------------------------L0.959-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 979ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 979ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 979ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 979ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 979ns 192kb |-----L0.?------| "
- - "**** Simulation run 368, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.960[24,100] 980ns |-----------------------------------------L0.960-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 980ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 980ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 980ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 980ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 980ns 192kb |-----L0.?------| "
- - "**** Simulation run 369, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.961[24,100] 981ns |-----------------------------------------L0.961-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 981ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 981ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 981ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 981ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 981ns 192kb |-----L0.?------| "
- - "**** Simulation run 370, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.962[24,100] 982ns |-----------------------------------------L0.962-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 982ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 982ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 982ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 982ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 982ns 192kb |-----L0.?------| "
- - "**** Simulation run 371, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.963[24,100] 983ns |-----------------------------------------L0.963-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 983ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 983ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 983ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 983ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 983ns 192kb |-----L0.?------| "
- - "**** Simulation run 372, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.964[24,100] 984ns |-----------------------------------------L0.964-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 984ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 984ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 984ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 984ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 984ns 192kb |-----L0.?------| "
- - "**** Simulation run 373, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.965[24,100] 985ns |-----------------------------------------L0.965-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 985ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 985ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 985ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 985ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 985ns 192kb |-----L0.?------| "
- - "**** Simulation run 374, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.966[24,100] 986ns |-----------------------------------------L0.966-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 986ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 986ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 986ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 986ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 986ns 192kb |-----L0.?------| "
- - "**** Simulation run 375, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.967[24,100] 987ns |-----------------------------------------L0.967-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 987ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 987ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 987ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 987ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 987ns 192kb |-----L0.?------| "
- - "**** Simulation run 376, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.968[24,100] 988ns |-----------------------------------------L0.968-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 988ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 988ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 988ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 988ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 988ns 192kb |-----L0.?------| "
- - "**** Simulation run 377, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.969[24,100] 989ns |-----------------------------------------L0.969-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 989ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 989ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 989ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 989ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 989ns 192kb |-----L0.?------| "
- - "**** Simulation run 378, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.970[24,100] 990ns |-----------------------------------------L0.970-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 990ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 990ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 990ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 990ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 990ns 192kb |-----L0.?------| "
- - "**** Simulation run 379, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.971[24,100] 991ns |-----------------------------------------L0.971-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 991ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 991ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 991ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 991ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 991ns 192kb |-----L0.?------| "
- - "**** Simulation run 380, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.972[24,100] 992ns |-----------------------------------------L0.972-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 992ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 992ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 992ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 992ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 992ns 192kb |-----L0.?------| "
- - "**** Simulation run 381, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.973[24,100] 993ns |-----------------------------------------L0.973-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 993ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 993ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 993ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 993ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 993ns 192kb |-----L0.?------| "
- - "**** Simulation run 382, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.974[24,100] 994ns |-----------------------------------------L0.974-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 994ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 994ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 994ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 994ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 994ns 192kb |-----L0.?------| "
- - "**** Simulation run 383, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.975[24,100] 995ns |-----------------------------------------L0.975-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 995ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 995ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 995ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 995ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 995ns 192kb |-----L0.?------| "
- - "**** Simulation run 384, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.976[24,100] 996ns |-----------------------------------------L0.976-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 996ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 996ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 996ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 996ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 996ns 192kb |-----L0.?------| "
- - "**** Simulation run 385, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.977[24,100] 997ns |-----------------------------------------L0.977-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 997ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 997ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 997ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 997ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 997ns 192kb |-----L0.?------| "
- - "**** Simulation run 386, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.978[24,100] 998ns |-----------------------------------------L0.978-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 998ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 998ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 998ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 998ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 998ns 192kb |-----L0.?------| "
- - "**** Simulation run 387, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.979[24,100] 999ns |-----------------------------------------L0.979-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 999ns 152kb |-----L0.?------| "
- - "L0.?[40,54] 999ns 141kb |-----L0.?-----| "
- - "L0.?[55,69] 999ns 141kb |-----L0.?-----| "
- - "L0.?[70,84] 999ns 141kb |-----L0.?-----| "
- - "L0.?[85,100] 999ns 192kb |-----L0.?------| "
- - "**** Simulation run 388, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.980[24,100] 1us |-----------------------------------------L0.980-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 1us 152kb |-----L0.?------| "
- - "L0.?[40,54] 1us 141kb |-----L0.?-----| "
- - "L0.?[55,69] 1us 141kb |-----L0.?-----| "
- - "L0.?[70,84] 1us 141kb |-----L0.?-----| "
- - "L0.?[85,100] 1us 192kb |-----L0.?------| "
- - "**** Simulation run 389, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.981[24,100] 1us |-----------------------------------------L0.981-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 1us 152kb |-----L0.?------| "
- - "L0.?[40,54] 1us 141kb |-----L0.?-----| "
- - "L0.?[55,69] 1us 141kb |-----L0.?-----| "
- - "L0.?[70,84] 1us 141kb |-----L0.?-----| "
- - "L0.?[85,100] 1us 192kb |-----L0.?------| "
- - "**** Simulation run 390, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.982[24,100] 1us |-----------------------------------------L0.982-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 1us 152kb |-----L0.?------| "
- - "L0.?[40,54] 1us 141kb |-----L0.?-----| "
- - "L0.?[55,69] 1us 141kb |-----L0.?-----| "
- - "L0.?[70,84] 1us 141kb |-----L0.?-----| "
- - "L0.?[85,100] 1us 192kb |-----L0.?------| "
- - "**** Simulation run 391, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.983[24,100] 1us |-----------------------------------------L0.983-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 1us 152kb |-----L0.?------| "
- - "L0.?[40,54] 1us 141kb |-----L0.?-----| "
- - "L0.?[55,69] 1us 141kb |-----L0.?-----| "
- - "L0.?[70,84] 1us 141kb |-----L0.?-----| "
- - "L0.?[85,100] 1us 192kb |-----L0.?------| "
- - "**** Simulation run 392, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.984[24,100] 1us |-----------------------------------------L0.984-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 1us 152kb |-----L0.?------| "
- - "L0.?[40,54] 1us 141kb |-----L0.?-----| "
- - "L0.?[55,69] 1us 141kb |-----L0.?-----| "
- - "L0.?[70,84] 1us 141kb |-----L0.?-----| "
- - "L0.?[85,100] 1us 192kb |-----L0.?------| "
- - "**** Simulation run 393, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.985[24,100] 1us |-----------------------------------------L0.985-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 1us 152kb |-----L0.?------| "
- - "L0.?[40,54] 1us 141kb |-----L0.?-----| "
- - "L0.?[55,69] 1us 141kb |-----L0.?-----| "
- - "L0.?[70,84] 1us 141kb |-----L0.?-----| "
- - "L0.?[85,100] 1us 192kb |-----L0.?------| "
- - "**** Simulation run 394, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.986[24,100] 1.01us |-----------------------------------------L0.986-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 1.01us 152kb |-----L0.?------| "
- - "L0.?[40,54] 1.01us 141kb |-----L0.?-----| "
- - "L0.?[55,69] 1.01us 141kb |-----L0.?-----| "
- - "L0.?[70,84] 1.01us 141kb |-----L0.?-----| "
- - "L0.?[85,100] 1.01us 192kb |-----L0.?------| "
- - "**** Simulation run 395, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.987[24,100] 1.01us |-----------------------------------------L0.987-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 1.01us 152kb |-----L0.?------| "
- - "L0.?[40,54] 1.01us 141kb |-----L0.?-----| "
- - "L0.?[55,69] 1.01us 141kb |-----L0.?-----| "
- - "L0.?[70,84] 1.01us 141kb |-----L0.?-----| "
- - "L0.?[85,100] 1.01us 192kb |-----L0.?------| "
- - "**** Simulation run 396, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.988[24,100] 1.01us |-----------------------------------------L0.988-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 1.01us 152kb |-----L0.?------| "
- - "L0.?[40,54] 1.01us 141kb |-----L0.?-----| "
- - "L0.?[55,69] 1.01us 141kb |-----L0.?-----| "
- - "L0.?[70,84] 1.01us 141kb |-----L0.?-----| "
- - "L0.?[85,100] 1.01us 192kb |-----L0.?------| "
- - "**** Simulation run 397, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.989[24,100] 1.01us |-----------------------------------------L0.989-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 1.01us 152kb |-----L0.?------| "
- - "L0.?[40,54] 1.01us 141kb |-----L0.?-----| "
- - "L0.?[55,69] 1.01us 141kb |-----L0.?-----| "
- - "L0.?[70,84] 1.01us 141kb |-----L0.?-----| "
- - "L0.?[85,100] 1.01us 192kb |-----L0.?------| "
- - "**** Simulation run 398, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.990[24,100] 1.01us |-----------------------------------------L0.990-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 1.01us 152kb |-----L0.?------| "
- - "L0.?[40,54] 1.01us 141kb |-----L0.?-----| "
- - "L0.?[55,69] 1.01us 141kb |-----L0.?-----| "
- - "L0.?[70,84] 1.01us 141kb |-----L0.?-----| "
- - "L0.?[85,100] 1.01us 192kb |-----L0.?------| "
- - "**** Simulation run 399, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.991[24,100] 1.01us |-----------------------------------------L0.991-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 1.01us 152kb |-----L0.?------| "
- - "L0.?[40,54] 1.01us 141kb |-----L0.?-----| "
- - "L0.?[55,69] 1.01us 141kb |-----L0.?-----| "
- - "L0.?[70,84] 1.01us 141kb |-----L0.?-----| "
- - "L0.?[85,100] 1.01us 192kb |-----L0.?------| "
- - "**** Simulation run 400, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.992[24,100] 1.01us |-----------------------------------------L0.992-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 1.01us 152kb |-----L0.?------| "
- - "L0.?[40,54] 1.01us 141kb |-----L0.?-----| "
- - "L0.?[55,69] 1.01us 141kb |-----L0.?-----| "
- - "L0.?[70,84] 1.01us 141kb |-----L0.?-----| "
- - "L0.?[85,100] 1.01us 192kb |-----L0.?------| "
- - "**** Simulation run 401, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.993[24,100] 1.01us |-----------------------------------------L0.993-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 1.01us 152kb |-----L0.?------| "
- - "L0.?[40,54] 1.01us 141kb |-----L0.?-----| "
- - "L0.?[55,69] 1.01us 141kb |-----L0.?-----| "
- - "L0.?[70,84] 1.01us 141kb |-----L0.?-----| "
- - "L0.?[85,100] 1.01us 192kb |-----L0.?------| "
- - "**** Simulation run 402, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.994[24,100] 1.01us |-----------------------------------------L0.994-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 1.01us 152kb |-----L0.?------| "
- - "L0.?[40,54] 1.01us 141kb |-----L0.?-----| "
- - "L0.?[55,69] 1.01us 141kb |-----L0.?-----| "
- - "L0.?[70,84] 1.01us 141kb |-----L0.?-----| "
- - "L0.?[85,100] 1.01us 192kb |-----L0.?------| "
- - "**** Simulation run 403, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.995[24,100] 1.01us |-----------------------------------------L0.995-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 1.01us 152kb |-----L0.?------| "
- - "L0.?[40,54] 1.01us 141kb |-----L0.?-----| "
- - "L0.?[55,69] 1.01us 141kb |-----L0.?-----| "
- - "L0.?[70,84] 1.01us 141kb |-----L0.?-----| "
- - "L0.?[85,100] 1.01us 192kb |-----L0.?------| "
- - "**** Simulation run 404, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.996[24,100] 1.02us |-----------------------------------------L0.996-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 1.02us 152kb |-----L0.?------| "
- - "L0.?[40,54] 1.02us 141kb |-----L0.?-----| "
- - "L0.?[55,69] 1.02us 141kb |-----L0.?-----| "
- - "L0.?[70,84] 1.02us 141kb |-----L0.?-----| "
- - "L0.?[85,100] 1.02us 192kb |-----L0.?------| "
- - "**** Simulation run 405, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.997[24,100] 1.02us |-----------------------------------------L0.997-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 1.02us 152kb |-----L0.?------| "
- - "L0.?[40,54] 1.02us 141kb |-----L0.?-----| "
- - "L0.?[55,69] 1.02us 141kb |-----L0.?-----| "
- - "L0.?[70,84] 1.02us 141kb |-----L0.?-----| "
- - "L0.?[85,100] 1.02us 192kb |-----L0.?------| "
- - "**** Simulation run 406, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.998[24,100] 1.02us |-----------------------------------------L0.998-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 1.02us 152kb |-----L0.?------| "
- - "L0.?[40,54] 1.02us 141kb |-----L0.?-----| "
- - "L0.?[55,69] 1.02us 141kb |-----L0.?-----| "
- - "L0.?[70,84] 1.02us 141kb |-----L0.?-----| "
- - "L0.?[85,100] 1.02us 192kb |-----L0.?------| "
- - "**** Simulation run 407, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.999[24,100] 1.02us |-----------------------------------------L0.999-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 1.02us 152kb |-----L0.?------| "
- - "L0.?[40,54] 1.02us 141kb |-----L0.?-----| "
- - "L0.?[55,69] 1.02us 141kb |-----L0.?-----| "
- - "L0.?[70,84] 1.02us 141kb |-----L0.?-----| "
- - "L0.?[85,100] 1.02us 192kb |-----L0.?------| "
- - "**** Simulation run 408, type=split(HighL0OverlapTotalBacklog)(split_times=[39, 54, 69, 84]). 1 Input Files, 768kb total:"
- - "L0, all files 768kb "
- - "L0.1000[24,100] 1.02us |----------------------------------------L0.1000-----------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 768kb total:"
- - "L0 "
- - "L0.?[24,39] 1.02us 152kb |-----L0.?------| "
- - "L0.?[40,54] 1.02us 141kb |-----L0.?-----| "
- - "L0.?[55,69] 1.02us 141kb |-----L0.?-----| "
- - "L0.?[70,84] 1.02us 141kb |-----L0.?-----| "
- - "L0.?[85,100] 1.02us 192kb |-----L0.?------| "
- - "**** Simulation run 409, type=split(HighL0OverlapTotalBacklog)(split_times=[54]). 1 Input Files, 100mb total:"
- - "L0, all files 100mb "
- - "L0.1007[43,60] 615ns |----------------------------------------L0.1007-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L0 "
- - "L0.?[43,54] 615ns 65mb |--------------------------L0.?--------------------------| "
- - "L0.?[55,60] 615ns 35mb |----------L0.?----------| "
- - "**** Simulation run 410, type=split(HighL0OverlapTotalBacklog)(split_times=[69]). 1 Input Files, 100mb total:"
- - "L0, all files 100mb "
- - "L0.1008[61,78] 615ns |----------------------------------------L0.1008-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L0 "
- - "L0.?[61,69] 615ns 47mb |------------------L0.?------------------| "
- - "L0.?[70,78] 615ns 53mb |------------------L0.?------------------| "
- - "**** Simulation run 411, type=split(HighL0OverlapTotalBacklog)(split_times=[84]). 1 Input Files, 100mb total:"
- - "L0, all files 100mb "
- - "L0.1009[79,96] 615ns |----------------------------------------L0.1009-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L0 "
- - "L0.?[79,84] 615ns 29mb |----------L0.?----------| "
- - "L0.?[85,96] 615ns 70mb |--------------------------L0.?--------------------------| "
- - "Committing partition 1:"
- - " Soft Deleting 409 files: L0.596, L0.597, L0.598, L0.599, L0.600, L0.601, L0.602, L0.603, L0.604, L0.605, L0.606, L0.607, L0.608, L0.609, L0.610, L0.611, L0.612, L0.613, L0.614, L0.615, L0.616, L0.617, L0.618, L0.619, L0.620, L0.621, L0.622, L0.623, L0.624, L0.625, L0.626, L0.627, L0.628, L0.629, L0.630, L0.631, L0.632, L0.633, L0.634, L0.635, L0.636, L0.637, L0.638, L0.639, L0.640, L0.641, L0.642, L0.643, L0.644, L0.645, L0.646, L0.647, L0.648, L0.649, L0.650, L0.651, L0.652, L0.653, L0.654, L0.655, L0.656, L0.657, L0.658, L0.659, L0.660, L0.661, L0.662, L0.663, L0.664, L0.665, L0.666, L0.667, L0.668, L0.669, L0.670, L0.671, L0.672, L0.673, L0.674, L0.675, L0.676, L0.677, L0.678, L0.679, L0.680, L0.681, L0.682, L0.683, L0.684, L0.685, L0.686, L0.687, L0.688, L0.689, L0.690, L0.691, L0.692, L0.693, L0.694, L0.695, L0.696, L0.697, L0.698, L0.699, L0.700, L0.701, L0.702, L0.703, L0.704, L0.705, L0.706, L0.707, L0.708, L0.709, L0.710, L0.711, L0.712, L0.713, L0.714, L0.715, L0.716, L0.717, L0.718, L0.719, L0.720, L0.721, L0.722, L0.723, L0.724, L0.725, L0.726, L0.727, L0.728, L0.729, L0.730, L0.731, L0.732, L0.733, L0.734, L0.735, L0.736, L0.737, L0.738, L0.739, L0.740, L0.741, L0.742, L0.743, L0.744, L0.745, L0.746, L0.747, L0.748, L0.749, L0.750, L0.751, L0.752, L0.753, L0.754, L0.755, L0.756, L0.757, L0.758, L0.759, L0.760, L0.761, L0.762, L0.763, L0.764, L0.765, L0.766, L0.767, L0.768, L0.769, L0.770, L0.771, L0.772, L0.773, L0.774, L0.775, L0.776, L0.777, L0.778, L0.779, L0.780, L0.781, L0.782, L0.783, L0.784, L0.785, L0.786, L0.787, L0.788, L0.789, L0.790, L0.791, L0.792, L0.793, L0.794, L0.795, L0.796, L0.797, L0.798, L0.799, L0.800, L0.801, L0.802, L0.803, L0.804, L0.805, L0.806, L0.807, L0.808, L0.809, L0.810, L0.811, L0.812, L0.813, L0.814, L0.815, L0.816, L0.817, L0.818, L0.819, L0.820, L0.821, L0.822, L0.823, L0.824, L0.825, L0.826, L0.827, L0.828, L0.829, L0.830, L0.831, L0.832, L0.833, L0.834, L0.835, L0.836, L0.837, L0.838, L0.839, L0.840, L0.841, L0.842, L0.843, L0.844, L0.845, L0.846, L0.847, L0.848, L0.849, L0.850, L0.851, L0.852, L0.853, L0.854, L0.855, L0.856, L0.857, L0.858, L0.859, L0.860, L0.861, L0.862, L0.863, L0.864, L0.865, L0.866, L0.867, L0.868, L0.869, L0.870, L0.871, L0.872, L0.873, L0.874, L0.875, L0.876, L0.877, L0.878, L0.879, L0.880, L0.881, L0.882, L0.883, L0.884, L0.885, L0.886, L0.887, L0.888, L0.889, L0.890, L0.891, L0.892, L0.893, L0.894, L0.895, L0.896, L0.897, L0.898, L0.899, L0.900, L0.901, L0.902, L0.903, L0.904, L0.905, L0.906, L0.907, L0.908, L0.909, L0.910, L0.911, L0.912, L0.913, L0.914, L0.915, L0.916, L0.917, L0.918, L0.919, L0.920, L0.921, L0.922, L0.923, L0.924, L0.925, L0.926, L0.927, L0.928, L0.929, L0.930, L0.931, L0.932, L0.933, L0.934, L0.935, L0.936, L0.937, L0.938, L0.939, L0.940, L0.941, L0.942, L0.943, L0.944, L0.945, L0.946, L0.947, L0.948, L0.949, L0.950, L0.951, L0.952, L0.953, L0.954, L0.955, L0.956, L0.957, L0.958, L0.959, L0.960, L0.961, L0.962, L0.963, L0.964, L0.965, L0.966, L0.967, L0.968, L0.969, L0.970, L0.971, L0.972, L0.973, L0.974, L0.975, L0.976, L0.977, L0.978, L0.979, L0.980, L0.981, L0.982, L0.983, L0.984, L0.985, L0.986, L0.987, L0.988, L0.989, L0.990, L0.991, L0.992, L0.993, L0.994, L0.995, L0.996, L0.997, L0.998, L0.999, L0.1000, L0.1006, L0.1007, L0.1008, L0.1009"
- - " Creating 2033 files"
- - "**** Simulation run 412, type=split(CompactAndSplitOutput(ManySmallFiles))(split_times=[40]). 200 Input Files, 198mb total:"
- - "L0 "
- - "L0.1011[24,39] 615ns 88mb|------------------L0.1011------------------| "
- - "L0.1012[40,42] 615ns 18mb |L0.1012| "
- - "L0.3038[43,54] 615ns 65mb |------------L0.3038------------|"
- - "L0.1013[24,39] 616ns 152kb|------------------L0.1013------------------| "
- - "L0.1014[40,54] 616ns 141kb |----------------L0.1014-----------------|"
- - "L0.1018[24,39] 617ns 152kb|------------------L0.1018------------------| "
- - "L0.1019[40,54] 617ns 141kb |----------------L0.1019-----------------|"
- - "L0.1023[24,39] 618ns 152kb|------------------L0.1023------------------| "
- - "L0.1024[40,54] 618ns 141kb |----------------L0.1024-----------------|"
- - "L0.1028[24,39] 619ns 152kb|------------------L0.1028------------------| "
- - "L0.1029[40,54] 619ns 141kb |----------------L0.1029-----------------|"
- - "L0.1033[24,39] 620ns 152kb|------------------L0.1033------------------| "
- - "L0.1034[40,54] 620ns 141kb |----------------L0.1034-----------------|"
- - "L0.1038[24,39] 621ns 152kb|------------------L0.1038------------------| "
- - "L0.1039[40,54] 621ns 141kb |----------------L0.1039-----------------|"
- - "L0.1043[24,39] 622ns 152kb|------------------L0.1043------------------| "
- - "L0.1044[40,54] 622ns 141kb |----------------L0.1044-----------------|"
- - "L0.1048[24,39] 623ns 152kb|------------------L0.1048------------------| "
- - "L0.1049[40,54] 623ns 141kb |----------------L0.1049-----------------|"
- - "L0.1053[24,39] 624ns 152kb|------------------L0.1053------------------| "
- - "L0.1054[40,54] 624ns 141kb |----------------L0.1054-----------------|"
- - "L0.1058[24,39] 625ns 152kb|------------------L0.1058------------------| "
- - "L0.1059[40,54] 625ns 141kb |----------------L0.1059-----------------|"
- - "L0.1063[24,39] 626ns 152kb|------------------L0.1063------------------| "
- - "L0.1064[40,54] 626ns 141kb |----------------L0.1064-----------------|"
- - "L0.1068[24,39] 627ns 152kb|------------------L0.1068------------------| "
- - "L0.1069[40,54] 627ns 141kb |----------------L0.1069-----------------|"
- - "L0.1073[24,39] 628ns 152kb|------------------L0.1073------------------| "
- - "L0.1074[40,54] 628ns 141kb |----------------L0.1074-----------------|"
- - "L0.1078[24,39] 629ns 152kb|------------------L0.1078------------------| "
- - "L0.1079[40,54] 629ns 141kb |----------------L0.1079-----------------|"
- - "L0.1083[24,39] 630ns 152kb|------------------L0.1083------------------| "
- - "L0.1084[40,54] 630ns 141kb |----------------L0.1084-----------------|"
- - "L0.1088[24,39] 631ns 152kb|------------------L0.1088------------------| "
- - "L0.1089[40,54] 631ns 141kb |----------------L0.1089-----------------|"
- - "L0.1093[24,39] 632ns 152kb|------------------L0.1093------------------| "
- - "L0.1094[40,54] 632ns 141kb |----------------L0.1094-----------------|"
- - "L0.1098[24,39] 633ns 152kb|------------------L0.1098------------------| "
- - "L0.1099[40,54] 633ns 141kb |----------------L0.1099-----------------|"
- - "L0.1103[24,39] 634ns 152kb|------------------L0.1103------------------| "
- - "L0.1104[40,54] 634ns 141kb |----------------L0.1104-----------------|"
- - "L0.1108[24,39] 635ns 152kb|------------------L0.1108------------------| "
- - "L0.1109[40,54] 635ns 141kb |----------------L0.1109-----------------|"
- - "L0.1113[24,39] 636ns 152kb|------------------L0.1113------------------| "
- - "L0.1114[40,54] 636ns 141kb |----------------L0.1114-----------------|"
- - "L0.1118[24,39] 637ns 152kb|------------------L0.1118------------------| "
- - "L0.1119[40,54] 637ns 141kb |----------------L0.1119-----------------|"
- - "L0.1123[24,39] 638ns 152kb|------------------L0.1123------------------| "
- - "L0.1124[40,54] 638ns 141kb |----------------L0.1124-----------------|"
- - "L0.1128[24,39] 639ns 152kb|------------------L0.1128------------------| "
- - "L0.1129[40,54] 639ns 141kb |----------------L0.1129-----------------|"
- - "L0.1133[24,39] 640ns 152kb|------------------L0.1133------------------| "
- - "L0.1134[40,54] 640ns 141kb |----------------L0.1134-----------------|"
- - "L0.1138[24,39] 641ns 152kb|------------------L0.1138------------------| "
- - "L0.1139[40,54] 641ns 141kb |----------------L0.1139-----------------|"
- - "L0.1143[24,39] 642ns 152kb|------------------L0.1143------------------| "
- - "L0.1144[40,54] 642ns 141kb |----------------L0.1144-----------------|"
- - "L0.1148[24,39] 643ns 152kb|------------------L0.1148------------------| "
- - "L0.1149[40,54] 643ns 141kb |----------------L0.1149-----------------|"
- - "L0.1153[24,39] 644ns 152kb|------------------L0.1153------------------| "
- - "L0.1154[40,54] 644ns 141kb |----------------L0.1154-----------------|"
- - "L0.1158[24,39] 645ns 152kb|------------------L0.1158------------------| "
- - "L0.1159[40,54] 645ns 141kb |----------------L0.1159-----------------|"
- - "L0.1163[24,39] 646ns 152kb|------------------L0.1163------------------| "
- - "L0.1164[40,54] 646ns 141kb |----------------L0.1164-----------------|"
- - "L0.1168[24,39] 647ns 152kb|------------------L0.1168------------------| "
- - "L0.1169[40,54] 647ns 141kb |----------------L0.1169-----------------|"
- - "L0.1213[24,39] 648ns 152kb|------------------L0.1213------------------| "
- - "L0.1214[40,54] 648ns 141kb |----------------L0.1214-----------------|"
- - "L0.1218[24,39] 649ns 152kb|------------------L0.1218------------------| "
- - "L0.1219[40,54] 649ns 141kb |----------------L0.1219-----------------|"
- - "L0.1173[24,39] 650ns 152kb|------------------L0.1173------------------| "
- - "L0.1174[40,54] 650ns 141kb |----------------L0.1174-----------------|"
- - "L0.1178[24,39] 651ns 152kb|------------------L0.1178------------------| "
- - "L0.1179[40,54] 651ns 141kb |----------------L0.1179-----------------|"
- - "L0.1183[24,39] 652ns 152kb|------------------L0.1183------------------| "
- - "L0.1184[40,54] 652ns 141kb |----------------L0.1184-----------------|"
- - "L0.1188[24,39] 653ns 152kb|------------------L0.1188------------------| "
- - "L0.1189[40,54] 653ns 141kb |----------------L0.1189-----------------|"
- - "L0.1193[24,39] 654ns 152kb|------------------L0.1193------------------| "
- - "L0.1194[40,54] 654ns 141kb |----------------L0.1194-----------------|"
- - "L0.1198[24,39] 655ns 152kb|------------------L0.1198------------------| "
- - "L0.1199[40,54] 655ns 141kb |----------------L0.1199-----------------|"
- - "L0.1203[24,39] 656ns 152kb|------------------L0.1203------------------| "
- - "L0.1204[40,54] 656ns 141kb |----------------L0.1204-----------------|"
- - "L0.1208[24,39] 657ns 152kb|------------------L0.1208------------------| "
- - "L0.1209[40,54] 657ns 141kb |----------------L0.1209-----------------|"
- - "L0.1223[24,39] 658ns 152kb|------------------L0.1223------------------| "
- - "L0.1224[40,54] 658ns 141kb |----------------L0.1224-----------------|"
- - "L0.1228[24,39] 659ns 152kb|------------------L0.1228------------------| "
- - "L0.1229[40,54] 659ns 141kb |----------------L0.1229-----------------|"
- - "L0.1233[24,39] 660ns 152kb|------------------L0.1233------------------| "
- - "L0.1234[40,54] 660ns 141kb |----------------L0.1234-----------------|"
- - "L0.1238[24,39] 661ns 152kb|------------------L0.1238------------------| "
- - "L0.1239[40,54] 661ns 141kb |----------------L0.1239-----------------|"
- - "L0.1243[24,39] 662ns 152kb|------------------L0.1243------------------| "
- - "L0.1244[40,54] 662ns 141kb |----------------L0.1244-----------------|"
- - "L0.1248[24,39] 663ns 152kb|------------------L0.1248------------------| "
- - "L0.1249[40,54] 663ns 141kb |----------------L0.1249-----------------|"
- - "L0.1253[24,39] 664ns 152kb|------------------L0.1253------------------| "
- - "L0.1254[40,54] 664ns 141kb |----------------L0.1254-----------------|"
- - "L0.1258[24,39] 665ns 152kb|------------------L0.1258------------------| "
- - "L0.1259[40,54] 665ns 141kb |----------------L0.1259-----------------|"
- - "L0.1263[24,39] 666ns 152kb|------------------L0.1263------------------| "
- - "L0.1264[40,54] 666ns 141kb |----------------L0.1264-----------------|"
- - "L0.1268[24,39] 667ns 152kb|------------------L0.1268------------------| "
- - "L0.1269[40,54] 667ns 141kb |----------------L0.1269-----------------|"
- - "L0.1273[24,39] 668ns 152kb|------------------L0.1273------------------| "
- - "L0.1274[40,54] 668ns 141kb |----------------L0.1274-----------------|"
- - "L0.1278[24,39] 669ns 152kb|------------------L0.1278------------------| "
- - "L0.1279[40,54] 669ns 141kb |----------------L0.1279-----------------|"
- - "L0.1283[24,39] 670ns 152kb|------------------L0.1283------------------| "
- - "L0.1284[40,54] 670ns 141kb |----------------L0.1284-----------------|"
- - "L0.1288[24,39] 671ns 152kb|------------------L0.1288------------------| "
- - "L0.1289[40,54] 671ns 141kb |----------------L0.1289-----------------|"
- - "L0.1293[24,39] 672ns 152kb|------------------L0.1293------------------| "
- - "L0.1294[40,54] 672ns 141kb |----------------L0.1294-----------------|"
- - "L0.1298[24,39] 673ns 152kb|------------------L0.1298------------------| "
- - "L0.1299[40,54] 673ns 141kb |----------------L0.1299-----------------|"
- - "L0.1303[24,39] 674ns 152kb|------------------L0.1303------------------| "
- - "L0.1304[40,54] 674ns 141kb |----------------L0.1304-----------------|"
- - "L0.1308[24,39] 675ns 152kb|------------------L0.1308------------------| "
- - "L0.1309[40,54] 675ns 141kb |----------------L0.1309-----------------|"
- - "L0.1313[24,39] 676ns 152kb|------------------L0.1313------------------| "
- - "L0.1314[40,54] 676ns 141kb |----------------L0.1314-----------------|"
- - "L0.1318[24,39] 677ns 152kb|------------------L0.1318------------------| "
- - "L0.1319[40,54] 677ns 141kb |----------------L0.1319-----------------|"
- - "L0.1323[24,39] 678ns 152kb|------------------L0.1323------------------| "
- - "L0.1324[40,54] 678ns 141kb |----------------L0.1324-----------------|"
- - "L0.1328[24,39] 679ns 152kb|------------------L0.1328------------------| "
- - "L0.1329[40,54] 679ns 141kb |----------------L0.1329-----------------|"
- - "L0.1333[24,39] 680ns 152kb|------------------L0.1333------------------| "
- - "L0.1334[40,54] 680ns 141kb |----------------L0.1334-----------------|"
- - "L0.1338[24,39] 681ns 152kb|------------------L0.1338------------------| "
- - "L0.1339[40,54] 681ns 141kb |----------------L0.1339-----------------|"
- - "L0.1343[24,39] 682ns 152kb|------------------L0.1343------------------| "
- - "L0.1344[40,54] 682ns 141kb |----------------L0.1344-----------------|"
- - "L0.1348[24,39] 683ns 152kb|------------------L0.1348------------------| "
- - "L0.1349[40,54] 683ns 141kb |----------------L0.1349-----------------|"
- - "L0.1353[24,39] 684ns 152kb|------------------L0.1353------------------| "
- - "L0.1354[40,54] 684ns 141kb |----------------L0.1354-----------------|"
- - "L0.1358[24,39] 685ns 152kb|------------------L0.1358------------------| "
- - "L0.1359[40,54] 685ns 141kb |----------------L0.1359-----------------|"
- - "L0.1363[24,39] 686ns 152kb|------------------L0.1363------------------| "
- - "L0.1364[40,54] 686ns 141kb |----------------L0.1364-----------------|"
- - "L0.1368[24,39] 687ns 152kb|------------------L0.1368------------------| "
- - "L0.1369[40,54] 687ns 141kb |----------------L0.1369-----------------|"
- - "L0.1373[24,39] 688ns 152kb|------------------L0.1373------------------| "
- - "L0.1374[40,54] 688ns 141kb |----------------L0.1374-----------------|"
- - "L0.1378[24,39] 689ns 152kb|------------------L0.1378------------------| "
- - "L0.1379[40,54] 689ns 141kb |----------------L0.1379-----------------|"
- - "L0.1383[24,39] 690ns 152kb|------------------L0.1383------------------| "
- - "L0.1384[40,54] 690ns 141kb |----------------L0.1384-----------------|"
- - "L0.1388[24,39] 691ns 152kb|------------------L0.1388------------------| "
- - "L0.1389[40,54] 691ns 141kb |----------------L0.1389-----------------|"
- - "L0.1393[24,39] 692ns 152kb|------------------L0.1393------------------| "
- - "L0.1394[40,54] 692ns 141kb |----------------L0.1394-----------------|"
- - "L0.1398[24,39] 693ns 152kb|------------------L0.1398------------------| "
- - "L0.1399[40,54] 693ns 141kb |----------------L0.1399-----------------|"
- - "L0.1403[24,39] 694ns 152kb|------------------L0.1403------------------| "
- - "L0.1404[40,54] 694ns 141kb |----------------L0.1404-----------------|"
- - "L0.1408[24,39] 695ns 152kb|------------------L0.1408------------------| "
- - "L0.1409[40,54] 695ns 141kb |----------------L0.1409-----------------|"
- - "L0.1413[24,39] 696ns 152kb|------------------L0.1413------------------| "
- - "L0.1414[40,54] 696ns 141kb |----------------L0.1414-----------------|"
- - "L0.1418[24,39] 697ns 152kb|------------------L0.1418------------------| "
- - "L0.1419[40,54] 697ns 141kb |----------------L0.1419-----------------|"
- - "L0.1423[24,39] 698ns 152kb|------------------L0.1423------------------| "
- - "L0.1424[40,54] 698ns 141kb |----------------L0.1424-----------------|"
- - "L0.1428[24,39] 699ns 152kb|------------------L0.1428------------------| "
- - "L0.1429[40,54] 699ns 141kb |----------------L0.1429-----------------|"
- - "L0.1433[24,39] 700ns 152kb|------------------L0.1433------------------| "
- - "L0.1434[40,54] 700ns 141kb |----------------L0.1434-----------------|"
- - "L0.1438[24,39] 701ns 152kb|------------------L0.1438------------------| "
- - "L0.1439[40,54] 701ns 141kb |----------------L0.1439-----------------|"
- - "L0.1443[24,39] 702ns 152kb|------------------L0.1443------------------| "
- - "L0.1444[40,54] 702ns 141kb |----------------L0.1444-----------------|"
- - "L0.1448[24,39] 703ns 152kb|------------------L0.1448------------------| "
- - "L0.1449[40,54] 703ns 141kb |----------------L0.1449-----------------|"
- - "L0.1453[24,39] 704ns 152kb|------------------L0.1453------------------| "
- - "L0.1454[40,54] 704ns 141kb |----------------L0.1454-----------------|"
- - "L0.1458[24,39] 705ns 152kb|------------------L0.1458------------------| "
- - "L0.1459[40,54] 705ns 141kb |----------------L0.1459-----------------|"
- - "L0.1463[24,39] 706ns 152kb|------------------L0.1463------------------| "
- - "L0.1464[40,54] 706ns 141kb |----------------L0.1464-----------------|"
- - "L0.1468[24,39] 707ns 152kb|------------------L0.1468------------------| "
- - "L0.1469[40,54] 707ns 141kb |----------------L0.1469-----------------|"
- - "L0.1473[24,39] 708ns 152kb|------------------L0.1473------------------| "
- - "L0.1474[40,54] 708ns 141kb |----------------L0.1474-----------------|"
- - "L0.1478[24,39] 709ns 152kb|------------------L0.1478------------------| "
- - "L0.1479[40,54] 709ns 141kb |----------------L0.1479-----------------|"
- - "L0.1483[24,39] 710ns 152kb|------------------L0.1483------------------| "
- - "L0.1484[40,54] 710ns 141kb |----------------L0.1484-----------------|"
- - "L0.1488[24,39] 711ns 152kb|------------------L0.1488------------------| "
- - "L0.1489[40,54] 711ns 141kb |----------------L0.1489-----------------|"
- - "L0.1493[24,39] 712ns 152kb|------------------L0.1493------------------| "
- - "L0.1494[40,54] 712ns 141kb |----------------L0.1494-----------------|"
- - "L0.1498[24,39] 713ns 152kb|------------------L0.1498------------------| "
- - "L0.1499[40,54] 713ns 141kb |----------------L0.1499-----------------|"
- - "L0.1503[24,39] 714ns 152kb|------------------L0.1503------------------| "
- - "**** 2 Output Files (parquet_file_id not yet assigned), 198mb total:"
- - "L0 "
- - "L0.?[24,40] 714ns 106mb |---------------------L0.?---------------------| "
- - "L0.?[41,54] 714ns 93mb |----------------L0.?-----------------|"
- - "Committing partition 1:"
- - " Soft Deleting 200 files: L0.1011, L0.1012, L0.1013, L0.1014, L0.1018, L0.1019, L0.1023, L0.1024, L0.1028, L0.1029, L0.1033, L0.1034, L0.1038, L0.1039, L0.1043, L0.1044, L0.1048, L0.1049, L0.1053, L0.1054, L0.1058, L0.1059, L0.1063, L0.1064, L0.1068, L0.1069, L0.1073, L0.1074, L0.1078, L0.1079, L0.1083, L0.1084, L0.1088, L0.1089, L0.1093, L0.1094, L0.1098, L0.1099, L0.1103, L0.1104, L0.1108, L0.1109, L0.1113, L0.1114, L0.1118, L0.1119, L0.1123, L0.1124, L0.1128, L0.1129, L0.1133, L0.1134, L0.1138, L0.1139, L0.1143, L0.1144, L0.1148, L0.1149, L0.1153, L0.1154, L0.1158, L0.1159, L0.1163, L0.1164, L0.1168, L0.1169, L0.1173, L0.1174, L0.1178, L0.1179, L0.1183, L0.1184, L0.1188, L0.1189, L0.1193, L0.1194, L0.1198, L0.1199, L0.1203, L0.1204, L0.1208, L0.1209, L0.1213, L0.1214, L0.1218, L0.1219, L0.1223, L0.1224, L0.1228, L0.1229, L0.1233, L0.1234, L0.1238, L0.1239, L0.1243, L0.1244, L0.1248, L0.1249, L0.1253, L0.1254, L0.1258, L0.1259, L0.1263, L0.1264, L0.1268, L0.1269, L0.1273, L0.1274, L0.1278, L0.1279, L0.1283, L0.1284, L0.1288, L0.1289, L0.1293, L0.1294, L0.1298, L0.1299, L0.1303, L0.1304, L0.1308, L0.1309, L0.1313, L0.1314, L0.1318, L0.1319, L0.1323, L0.1324, L0.1328, L0.1329, L0.1333, L0.1334, L0.1338, L0.1339, L0.1343, L0.1344, L0.1348, L0.1349, L0.1353, L0.1354, L0.1358, L0.1359, L0.1363, L0.1364, L0.1368, L0.1369, L0.1373, L0.1374, L0.1378, L0.1379, L0.1383, L0.1384, L0.1388, L0.1389, L0.1393, L0.1394, L0.1398, L0.1399, L0.1403, L0.1404, L0.1408, L0.1409, L0.1413, L0.1414, L0.1418, L0.1419, L0.1423, L0.1424, L0.1428, L0.1429, L0.1433, L0.1434, L0.1438, L0.1439, L0.1443, L0.1444, L0.1448, L0.1449, L0.1453, L0.1454, L0.1458, L0.1459, L0.1463, L0.1464, L0.1468, L0.1469, L0.1473, L0.1474, L0.1478, L0.1479, L0.1483, L0.1484, L0.1488, L0.1489, L0.1493, L0.1494, L0.1498, L0.1499, L0.1503, L0.3038"
- - " Creating 2 files"
- - "**** Simulation run 413, type=split(CompactAndSplitOutput(ManySmallFiles))(split_times=[48]). 200 Input Files, 29mb total:"
- - "L0 "
- - "L0.2004[40,54] 814ns 141kb |----------------L0.2004-----------------|"
- - "L0.2008[24,39] 815ns 152kb|------------------L0.2008------------------| "
- - "L0.2009[40,54] 815ns 141kb |----------------L0.2009-----------------|"
- - "L0.2013[24,39] 816ns 152kb|------------------L0.2013------------------| "
- - "L0.2014[40,54] 816ns 141kb |----------------L0.2014-----------------|"
- - "L0.2018[24,39] 817ns 152kb|------------------L0.2018------------------| "
- - "L0.2019[40,54] 817ns 141kb |----------------L0.2019-----------------|"
- - "L0.2023[24,39] 818ns 152kb|------------------L0.2023------------------| "
- - "L0.2024[40,54] 818ns 141kb |----------------L0.2024-----------------|"
- - "L0.2028[24,39] 819ns 152kb|------------------L0.2028------------------| "
- - "L0.2029[40,54] 819ns 141kb |----------------L0.2029-----------------|"
- - "L0.2033[24,39] 820ns 152kb|------------------L0.2033------------------| "
- - "L0.2034[40,54] 820ns 141kb |----------------L0.2034-----------------|"
- - "L0.2038[24,39] 821ns 152kb|------------------L0.2038------------------| "
- - "L0.2039[40,54] 821ns 141kb |----------------L0.2039-----------------|"
- - "L0.2043[24,39] 822ns 152kb|------------------L0.2043------------------| "
- - "L0.2044[40,54] 822ns 141kb |----------------L0.2044-----------------|"
- - "L0.2048[24,39] 823ns 152kb|------------------L0.2048------------------| "
- - "L0.2049[40,54] 823ns 141kb |----------------L0.2049-----------------|"
- - "L0.2053[24,39] 824ns 152kb|------------------L0.2053------------------| "
- - "L0.2054[40,54] 824ns 141kb |----------------L0.2054-----------------|"
- - "L0.2058[24,39] 825ns 152kb|------------------L0.2058------------------| "
- - "L0.2059[40,54] 825ns 141kb |----------------L0.2059-----------------|"
- - "L0.2063[24,39] 826ns 152kb|------------------L0.2063------------------| "
- - "L0.2064[40,54] 826ns 141kb |----------------L0.2064-----------------|"
- - "L0.2068[24,39] 827ns 152kb|------------------L0.2068------------------| "
- - "L0.2069[40,54] 827ns 141kb |----------------L0.2069-----------------|"
- - "L0.2073[24,39] 828ns 152kb|------------------L0.2073------------------| "
- - "L0.2074[40,54] 828ns 141kb |----------------L0.2074-----------------|"
- - "L0.2078[24,39] 829ns 152kb|------------------L0.2078------------------| "
- - "L0.2079[40,54] 829ns 141kb |----------------L0.2079-----------------|"
- - "L0.2083[24,39] 830ns 152kb|------------------L0.2083------------------| "
- - "L0.2084[40,54] 830ns 141kb |----------------L0.2084-----------------|"
- - "L0.2088[24,39] 831ns 152kb|------------------L0.2088------------------| "
- - "L0.2089[40,54] 831ns 141kb |----------------L0.2089-----------------|"
- - "L0.2093[24,39] 832ns 152kb|------------------L0.2093------------------| "
- - "L0.2094[40,54] 832ns 141kb |----------------L0.2094-----------------|"
- - "L0.2098[24,39] 833ns 152kb|------------------L0.2098------------------| "
- - "L0.2099[40,54] 833ns 141kb |----------------L0.2099-----------------|"
- - "L0.2103[24,39] 834ns 152kb|------------------L0.2103------------------| "
- - "L0.2104[40,54] 834ns 141kb |----------------L0.2104-----------------|"
- - "L0.2108[24,39] 835ns 152kb|------------------L0.2108------------------| "
- - "L0.2109[40,54] 835ns 141kb |----------------L0.2109-----------------|"
- - "L0.2113[24,39] 836ns 152kb|------------------L0.2113------------------| "
- - "L0.2114[40,54] 836ns 141kb |----------------L0.2114-----------------|"
- - "L0.2118[24,39] 837ns 152kb|------------------L0.2118------------------| "
- - "L0.2119[40,54] 837ns 141kb |----------------L0.2119-----------------|"
- - "L0.2123[24,39] 838ns 152kb|------------------L0.2123------------------| "
- - "L0.2124[40,54] 838ns 141kb |----------------L0.2124-----------------|"
- - "L0.2128[24,39] 839ns 152kb|------------------L0.2128------------------| "
- - "L0.2129[40,54] 839ns 141kb |----------------L0.2129-----------------|"
- - "L0.2133[24,39] 840ns 152kb|------------------L0.2133------------------| "
- - "L0.2134[40,54] 840ns 141kb |----------------L0.2134-----------------|"
- - "L0.2138[24,39] 841ns 152kb|------------------L0.2138------------------| "
- - "L0.2139[40,54] 841ns 141kb |----------------L0.2139-----------------|"
- - "L0.2143[24,39] 842ns 152kb|------------------L0.2143------------------| "
- - "L0.2144[40,54] 842ns 141kb |----------------L0.2144-----------------|"
- - "L0.2148[24,39] 843ns 152kb|------------------L0.2148------------------| "
- - "L0.2149[40,54] 843ns 141kb |----------------L0.2149-----------------|"
- - "L0.2153[24,39] 844ns 152kb|------------------L0.2153------------------| "
- - "L0.2154[40,54] 844ns 141kb |----------------L0.2154-----------------|"
- - "L0.2158[24,39] 845ns 152kb|------------------L0.2158------------------| "
- - "L0.2159[40,54] 845ns 141kb |----------------L0.2159-----------------|"
- - "L0.2163[24,39] 846ns 152kb|------------------L0.2163------------------| "
- - "L0.2164[40,54] 846ns 141kb |----------------L0.2164-----------------|"
- - "L0.2168[24,39] 847ns 152kb|------------------L0.2168------------------| "
- - "L0.2169[40,54] 847ns 141kb |----------------L0.2169-----------------|"
- - "L0.2173[24,39] 848ns 152kb|------------------L0.2173------------------| "
- - "L0.2174[40,54] 848ns 141kb |----------------L0.2174-----------------|"
- - "L0.2178[24,39] 849ns 152kb|------------------L0.2178------------------| "
- - "L0.2179[40,54] 849ns 141kb |----------------L0.2179-----------------|"
- - "L0.2183[24,39] 850ns 152kb|------------------L0.2183------------------| "
- - "L0.2184[40,54] 850ns 141kb |----------------L0.2184-----------------|"
- - "L0.2188[24,39] 851ns 152kb|------------------L0.2188------------------| "
- - "L0.2189[40,54] 851ns 141kb |----------------L0.2189-----------------|"
- - "L0.2193[24,39] 852ns 152kb|------------------L0.2193------------------| "
- - "L0.2194[40,54] 852ns 141kb |----------------L0.2194-----------------|"
- - "L0.2198[24,39] 853ns 152kb|------------------L0.2198------------------| "
- - "L0.2199[40,54] 853ns 141kb |----------------L0.2199-----------------|"
- - "L0.2203[24,39] 854ns 152kb|------------------L0.2203------------------| "
- - "L0.2204[40,54] 854ns 141kb |----------------L0.2204-----------------|"
- - "L0.2208[24,39] 855ns 152kb|------------------L0.2208------------------| "
- - "L0.2209[40,54] 855ns 141kb |----------------L0.2209-----------------|"
- - "L0.2213[24,39] 856ns 152kb|------------------L0.2213------------------| "
- - "L0.2214[40,54] 856ns 141kb |----------------L0.2214-----------------|"
- - "L0.2218[24,39] 857ns 152kb|------------------L0.2218------------------| "
- - "L0.2219[40,54] 857ns 141kb |----------------L0.2219-----------------|"
- - "L0.2223[24,39] 858ns 152kb|------------------L0.2223------------------| "
- - "L0.2224[40,54] 858ns 141kb |----------------L0.2224-----------------|"
- - "L0.2228[24,39] 859ns 152kb|------------------L0.2228------------------| "
- - "L0.2229[40,54] 859ns 141kb |----------------L0.2229-----------------|"
- - "L0.2233[24,39] 860ns 152kb|------------------L0.2233------------------| "
- - "L0.2234[40,54] 860ns 141kb |----------------L0.2234-----------------|"
- - "L0.2238[24,39] 861ns 152kb|------------------L0.2238------------------| "
- - "L0.2239[40,54] 861ns 141kb |----------------L0.2239-----------------|"
- - "L0.2243[24,39] 862ns 152kb|------------------L0.2243------------------| "
- - "L0.2244[40,54] 862ns 141kb |----------------L0.2244-----------------|"
- - "L0.2248[24,39] 863ns 152kb|------------------L0.2248------------------| "
- - "L0.2249[40,54] 863ns 141kb |----------------L0.2249-----------------|"
- - "L0.2253[24,39] 864ns 152kb|------------------L0.2253------------------| "
- - "L0.2254[40,54] 864ns 141kb |----------------L0.2254-----------------|"
- - "L0.2258[24,39] 865ns 152kb|------------------L0.2258------------------| "
- - "L0.2259[40,54] 865ns 141kb |----------------L0.2259-----------------|"
- - "L0.2263[24,39] 866ns 152kb|------------------L0.2263------------------| "
- - "L0.2264[40,54] 866ns 141kb |----------------L0.2264-----------------|"
- - "L0.2268[24,39] 867ns 152kb|------------------L0.2268------------------| "
- - "L0.2269[40,54] 867ns 141kb |----------------L0.2269-----------------|"
- - "L0.2273[24,39] 868ns 152kb|------------------L0.2273------------------| "
- - "L0.2274[40,54] 868ns 141kb |----------------L0.2274-----------------|"
- - "L0.2278[24,39] 869ns 152kb|------------------L0.2278------------------| "
- - "L0.2279[40,54] 869ns 141kb |----------------L0.2279-----------------|"
- - "L0.2283[24,39] 870ns 152kb|------------------L0.2283------------------| "
- - "L0.2284[40,54] 870ns 141kb |----------------L0.2284-----------------|"
- - "L0.2288[24,39] 871ns 152kb|------------------L0.2288------------------| "
- - "L0.2289[40,54] 871ns 141kb |----------------L0.2289-----------------|"
- - "L0.2293[24,39] 872ns 152kb|------------------L0.2293------------------| "
- - "L0.2294[40,54] 872ns 141kb |----------------L0.2294-----------------|"
- - "L0.2298[24,39] 873ns 152kb|------------------L0.2298------------------| "
- - "L0.2299[40,54] 873ns 141kb |----------------L0.2299-----------------|"
- - "L0.2303[24,39] 874ns 152kb|------------------L0.2303------------------| "
- - "L0.2304[40,54] 874ns 141kb |----------------L0.2304-----------------|"
- - "L0.2308[24,39] 875ns 152kb|------------------L0.2308------------------| "
- - "L0.2309[40,54] 875ns 141kb |----------------L0.2309-----------------|"
- - "L0.2313[24,39] 876ns 152kb|------------------L0.2313------------------| "
- - "L0.2314[40,54] 876ns 141kb |----------------L0.2314-----------------|"
- - "L0.2318[24,39] 877ns 152kb|------------------L0.2318------------------| "
- - "L0.2319[40,54] 877ns 141kb |----------------L0.2319-----------------|"
- - "L0.2323[24,39] 878ns 152kb|------------------L0.2323------------------| "
- - "L0.2324[40,54] 878ns 141kb |----------------L0.2324-----------------|"
- - "L0.2328[24,39] 879ns 152kb|------------------L0.2328------------------| "
- - "L0.2329[40,54] 879ns 141kb |----------------L0.2329-----------------|"
- - "L0.2333[24,39] 880ns 152kb|------------------L0.2333------------------| "
- - "L0.2334[40,54] 880ns 141kb |----------------L0.2334-----------------|"
- - "L0.2338[24,39] 881ns 152kb|------------------L0.2338------------------| "
- - "L0.2339[40,54] 881ns 141kb |----------------L0.2339-----------------|"
- - "L0.2343[24,39] 882ns 152kb|------------------L0.2343------------------| "
- - "L0.2344[40,54] 882ns 141kb |----------------L0.2344-----------------|"
- - "L0.2348[24,39] 883ns 152kb|------------------L0.2348------------------| "
- - "L0.2349[40,54] 883ns 141kb |----------------L0.2349-----------------|"
- - "L0.2353[24,39] 884ns 152kb|------------------L0.2353------------------| "
- - "L0.2354[40,54] 884ns 141kb |----------------L0.2354-----------------|"
- - "L0.2358[24,39] 885ns 152kb|------------------L0.2358------------------| "
- - "L0.2359[40,54] 885ns 141kb |----------------L0.2359-----------------|"
- - "L0.2363[24,39] 886ns 152kb|------------------L0.2363------------------| "
- - "L0.2364[40,54] 886ns 141kb |----------------L0.2364-----------------|"
- - "L0.2368[24,39] 887ns 152kb|------------------L0.2368------------------| "
- - "L0.2369[40,54] 887ns 141kb |----------------L0.2369-----------------|"
- - "L0.2373[24,39] 888ns 152kb|------------------L0.2373------------------| "
- - "L0.2374[40,54] 888ns 141kb |----------------L0.2374-----------------|"
- - "L0.2378[24,39] 889ns 152kb|------------------L0.2378------------------| "
- - "L0.2379[40,54] 889ns 141kb |----------------L0.2379-----------------|"
- - "L0.2383[24,39] 890ns 152kb|------------------L0.2383------------------| "
- - "L0.2384[40,54] 890ns 141kb |----------------L0.2384-----------------|"
- - "L0.2388[24,39] 891ns 152kb|------------------L0.2388------------------| "
- - "L0.2389[40,54] 891ns 141kb |----------------L0.2389-----------------|"
- - "L0.2393[24,39] 892ns 152kb|------------------L0.2393------------------| "
- - "L0.2394[40,54] 892ns 141kb |----------------L0.2394-----------------|"
- - "L0.2398[24,39] 893ns 152kb|------------------L0.2398------------------| "
- - "L0.2399[40,54] 893ns 141kb |----------------L0.2399-----------------|"
- - "L0.2403[24,39] 894ns 152kb|------------------L0.2403------------------| "
- - "L0.2404[40,54] 894ns 141kb |----------------L0.2404-----------------|"
- - "L0.2408[24,39] 895ns 152kb|------------------L0.2408------------------| "
- - "L0.2409[40,54] 895ns 141kb |----------------L0.2409-----------------|"
- - "L0.2413[24,39] 896ns 152kb|------------------L0.2413------------------| "
- - "L0.2414[40,54] 896ns 141kb |----------------L0.2414-----------------|"
- - "L0.2418[24,39] 897ns 152kb|------------------L0.2418------------------| "
- - "L0.2419[40,54] 897ns 141kb |----------------L0.2419-----------------|"
- - "L0.2423[24,39] 898ns 152kb|------------------L0.2423------------------| "
- - "L0.2424[40,54] 898ns 141kb |----------------L0.2424-----------------|"
- - "L0.2428[24,39] 899ns 152kb|------------------L0.2428------------------| "
- - "L0.2429[40,54] 899ns 141kb |----------------L0.2429-----------------|"
- - "L0.2433[24,39] 900ns 152kb|------------------L0.2433------------------| "
- - "L0.2434[40,54] 900ns 141kb |----------------L0.2434-----------------|"
- - "L0.2438[24,39] 901ns 152kb|------------------L0.2438------------------| "
- - "L0.2439[40,54] 901ns 141kb |----------------L0.2439-----------------|"
- - "L0.2443[24,39] 902ns 152kb|------------------L0.2443------------------| "
- - "L0.2444[40,54] 902ns 141kb |----------------L0.2444-----------------|"
- - "L0.2448[24,39] 903ns 152kb|------------------L0.2448------------------| "
- - "L0.2449[40,54] 903ns 141kb |----------------L0.2449-----------------|"
- - "L0.2493[24,39] 904ns 152kb|------------------L0.2493------------------| "
- - "L0.2494[40,54] 904ns 141kb |----------------L0.2494-----------------|"
- - "L0.2498[24,39] 905ns 152kb|------------------L0.2498------------------| "
- - "L0.2499[40,54] 905ns 141kb |----------------L0.2499-----------------|"
- - "L0.2453[24,39] 906ns 152kb|------------------L0.2453------------------| "
- - "L0.2454[40,54] 906ns 141kb |----------------L0.2454-----------------|"
- - "L0.2458[24,39] 907ns 152kb|------------------L0.2458------------------| "
- - "L0.2459[40,54] 907ns 141kb |----------------L0.2459-----------------|"
- - "L0.2463[24,39] 908ns 152kb|------------------L0.2463------------------| "
- - "L0.2464[40,54] 908ns 141kb |----------------L0.2464-----------------|"
- - "L0.2468[24,39] 909ns 152kb|------------------L0.2468------------------| "
- - "L0.2469[40,54] 909ns 141kb |----------------L0.2469-----------------|"
- - "L0.2473[24,39] 910ns 152kb|------------------L0.2473------------------| "
- - "L0.2474[40,54] 910ns 141kb |----------------L0.2474-----------------|"
- - "L0.2478[24,39] 911ns 152kb|------------------L0.2478------------------| "
- - "L0.2479[40,54] 911ns 141kb |----------------L0.2479-----------------|"
- - "L0.2483[24,39] 912ns 152kb|------------------L0.2483------------------| "
- - "L0.2484[40,54] 912ns 141kb |----------------L0.2484-----------------|"
- - "L0.2488[24,39] 913ns 152kb|------------------L0.2488------------------| "
- - "L0.2489[40,54] 913ns 141kb |----------------L0.2489-----------------|"
- - "L0.2503[24,39] 914ns 152kb|------------------L0.2503------------------| "
- - "**** 2 Output Files (parquet_file_id not yet assigned), 29mb total:"
- - "L0 "
- - "L0.?[24,48] 914ns 23mb |---------------------------------L0.?---------------------------------| "
- - "L0.?[49,54] 914ns 6mb |----L0.?-----|"
- - "Committing partition 1:"
- - " Soft Deleting 200 files: L0.2004, L0.2008, L0.2009, L0.2013, L0.2014, L0.2018, L0.2019, L0.2023, L0.2024, L0.2028, L0.2029, L0.2033, L0.2034, L0.2038, L0.2039, L0.2043, L0.2044, L0.2048, L0.2049, L0.2053, L0.2054, L0.2058, L0.2059, L0.2063, L0.2064, L0.2068, L0.2069, L0.2073, L0.2074, L0.2078, L0.2079, L0.2083, L0.2084, L0.2088, L0.2089, L0.2093, L0.2094, L0.2098, L0.2099, L0.2103, L0.2104, L0.2108, L0.2109, L0.2113, L0.2114, L0.2118, L0.2119, L0.2123, L0.2124, L0.2128, L0.2129, L0.2133, L0.2134, L0.2138, L0.2139, L0.2143, L0.2144, L0.2148, L0.2149, L0.2153, L0.2154, L0.2158, L0.2159, L0.2163, L0.2164, L0.2168, L0.2169, L0.2173, L0.2174, L0.2178, L0.2179, L0.2183, L0.2184, L0.2188, L0.2189, L0.2193, L0.2194, L0.2198, L0.2199, L0.2203, L0.2204, L0.2208, L0.2209, L0.2213, L0.2214, L0.2218, L0.2219, L0.2223, L0.2224, L0.2228, L0.2229, L0.2233, L0.2234, L0.2238, L0.2239, L0.2243, L0.2244, L0.2248, L0.2249, L0.2253, L0.2254, L0.2258, L0.2259, L0.2263, L0.2264, L0.2268, L0.2269, L0.2273, L0.2274, L0.2278, L0.2279, L0.2283, L0.2284, L0.2288, L0.2289, L0.2293, L0.2294, L0.2298, L0.2299, L0.2303, L0.2304, L0.2308, L0.2309, L0.2313, L0.2314, L0.2318, L0.2319, L0.2323, L0.2324, L0.2328, L0.2329, L0.2333, L0.2334, L0.2338, L0.2339, L0.2343, L0.2344, L0.2348, L0.2349, L0.2353, L0.2354, L0.2358, L0.2359, L0.2363, L0.2364, L0.2368, L0.2369, L0.2373, L0.2374, L0.2378, L0.2379, L0.2383, L0.2384, L0.2388, L0.2389, L0.2393, L0.2394, L0.2398, L0.2399, L0.2403, L0.2404, L0.2408, L0.2409, L0.2413, L0.2414, L0.2418, L0.2419, L0.2423, L0.2424, L0.2428, L0.2429, L0.2433, L0.2434, L0.2438, L0.2439, L0.2443, L0.2444, L0.2448, L0.2449, L0.2453, L0.2454, L0.2458, L0.2459, L0.2463, L0.2464, L0.2468, L0.2469, L0.2473, L0.2474, L0.2478, L0.2479, L0.2483, L0.2484, L0.2488, L0.2489, L0.2493, L0.2494, L0.2498, L0.2499, L0.2503"
- - " Creating 2 files"
- - "**** Simulation run 414, type=split(CompactAndSplitOutput(ManySmallFiles))(split_times=[48]). 200 Input Files, 29mb total:"
- - "L0 "
- - "L0.2504[40,54] 914ns 141kb |----------------L0.2504-----------------|"
- - "L0.2508[24,39] 915ns 152kb|------------------L0.2508------------------| "
- - "L0.2509[40,54] 915ns 141kb |----------------L0.2509-----------------|"
- - "L0.2513[24,39] 916ns 152kb|------------------L0.2513------------------| "
- - "L0.2514[40,54] 916ns 141kb |----------------L0.2514-----------------|"
- - "L0.2518[24,39] 917ns 152kb|------------------L0.2518------------------| "
- - "L0.2519[40,54] 917ns 141kb |----------------L0.2519-----------------|"
- - "L0.2523[24,39] 918ns 152kb|------------------L0.2523------------------| "
- - "L0.2524[40,54] 918ns 141kb |----------------L0.2524-----------------|"
- - "L0.2528[24,39] 919ns 152kb|------------------L0.2528------------------| "
- - "L0.2529[40,54] 919ns 141kb |----------------L0.2529-----------------|"
- - "L0.2533[24,39] 920ns 152kb|------------------L0.2533------------------| "
- - "L0.2534[40,54] 920ns 141kb |----------------L0.2534-----------------|"
- - "L0.2538[24,39] 921ns 152kb|------------------L0.2538------------------| "
- - "L0.2539[40,54] 921ns 141kb |----------------L0.2539-----------------|"
- - "L0.2543[24,39] 922ns 152kb|------------------L0.2543------------------| "
- - "L0.2544[40,54] 922ns 141kb |----------------L0.2544-----------------|"
- - "L0.2548[24,39] 923ns 152kb|------------------L0.2548------------------| "
- - "L0.2549[40,54] 923ns 141kb |----------------L0.2549-----------------|"
- - "L0.2553[24,39] 924ns 152kb|------------------L0.2553------------------| "
- - "L0.2554[40,54] 924ns 141kb |----------------L0.2554-----------------|"
- - "L0.2558[24,39] 925ns 152kb|------------------L0.2558------------------| "
- - "L0.2559[40,54] 925ns 141kb |----------------L0.2559-----------------|"
- - "L0.2563[24,39] 926ns 152kb|------------------L0.2563------------------| "
- - "L0.2564[40,54] 926ns 141kb |----------------L0.2564-----------------|"
- - "L0.2568[24,39] 927ns 152kb|------------------L0.2568------------------| "
- - "L0.2569[40,54] 927ns 141kb |----------------L0.2569-----------------|"
- - "L0.2573[24,39] 928ns 152kb|------------------L0.2573------------------| "
- - "L0.2574[40,54] 928ns 141kb |----------------L0.2574-----------------|"
- - "L0.2578[24,39] 929ns 152kb|------------------L0.2578------------------| "
- - "L0.2579[40,54] 929ns 141kb |----------------L0.2579-----------------|"
- - "L0.2583[24,39] 930ns 152kb|------------------L0.2583------------------| "
- - "L0.2584[40,54] 930ns 141kb |----------------L0.2584-----------------|"
- - "L0.2588[24,39] 931ns 152kb|------------------L0.2588------------------| "
- - "L0.2589[40,54] 931ns 141kb |----------------L0.2589-----------------|"
- - "L0.2593[24,39] 932ns 152kb|------------------L0.2593------------------| "
- - "L0.2594[40,54] 932ns 141kb |----------------L0.2594-----------------|"
- - "L0.2598[24,39] 933ns 152kb|------------------L0.2598------------------| "
- - "L0.2599[40,54] 933ns 141kb |----------------L0.2599-----------------|"
- - "L0.2603[24,39] 934ns 152kb|------------------L0.2603------------------| "
- - "L0.2604[40,54] 934ns 141kb |----------------L0.2604-----------------|"
- - "L0.2608[24,39] 935ns 152kb|------------------L0.2608------------------| "
- - "L0.2609[40,54] 935ns 141kb |----------------L0.2609-----------------|"
- - "L0.2613[24,39] 936ns 152kb|------------------L0.2613------------------| "
- - "L0.2614[40,54] 936ns 141kb |----------------L0.2614-----------------|"
- - "L0.2618[24,39] 937ns 152kb|------------------L0.2618------------------| "
- - "L0.2619[40,54] 937ns 141kb |----------------L0.2619-----------------|"
- - "L0.2623[24,39] 938ns 152kb|------------------L0.2623------------------| "
- - "L0.2624[40,54] 938ns 141kb |----------------L0.2624-----------------|"
- - "L0.2628[24,39] 939ns 152kb|------------------L0.2628------------------| "
- - "L0.2629[40,54] 939ns 141kb |----------------L0.2629-----------------|"
- - "L0.2633[24,39] 940ns 152kb|------------------L0.2633------------------| "
- - "L0.2634[40,54] 940ns 141kb |----------------L0.2634-----------------|"
- - "L0.2638[24,39] 941ns 152kb|------------------L0.2638------------------| "
- - "L0.2639[40,54] 941ns 141kb |----------------L0.2639-----------------|"
- - "L0.2643[24,39] 942ns 152kb|------------------L0.2643------------------| "
- - "L0.2644[40,54] 942ns 141kb |----------------L0.2644-----------------|"
- - "L0.2648[24,39] 943ns 152kb|------------------L0.2648------------------| "
- - "L0.2649[40,54] 943ns 141kb |----------------L0.2649-----------------|"
- - "L0.2653[24,39] 944ns 152kb|------------------L0.2653------------------| "
- - "L0.2654[40,54] 944ns 141kb |----------------L0.2654-----------------|"
- - "L0.2658[24,39] 945ns 152kb|------------------L0.2658------------------| "
- - "L0.2659[40,54] 945ns 141kb |----------------L0.2659-----------------|"
- - "L0.2663[24,39] 946ns 152kb|------------------L0.2663------------------| "
- - "L0.2664[40,54] 946ns 141kb |----------------L0.2664-----------------|"
- - "L0.2668[24,39] 947ns 152kb|------------------L0.2668------------------| "
- - "L0.2669[40,54] 947ns 141kb |----------------L0.2669-----------------|"
- - "L0.2673[24,39] 948ns 152kb|------------------L0.2673------------------| "
- - "L0.2674[40,54] 948ns 141kb |----------------L0.2674-----------------|"
- - "L0.2678[24,39] 949ns 152kb|------------------L0.2678------------------| "
- - "L0.2679[40,54] 949ns 141kb |----------------L0.2679-----------------|"
- - "L0.2683[24,39] 950ns 152kb|------------------L0.2683------------------| "
- - "L0.2684[40,54] 950ns 141kb |----------------L0.2684-----------------|"
- - "L0.2688[24,39] 951ns 152kb|------------------L0.2688------------------| "
- - "L0.2689[40,54] 951ns 141kb |----------------L0.2689-----------------|"
- - "L0.2693[24,39] 952ns 152kb|------------------L0.2693------------------| "
- - "L0.2694[40,54] 952ns 141kb |----------------L0.2694-----------------|"
- - "L0.2698[24,39] 953ns 152kb|------------------L0.2698------------------| "
- - "L0.2699[40,54] 953ns 141kb |----------------L0.2699-----------------|"
- - "L0.2703[24,39] 954ns 152kb|------------------L0.2703------------------| "
- - "L0.2704[40,54] 954ns 141kb |----------------L0.2704-----------------|"
- - "L0.2708[24,39] 955ns 152kb|------------------L0.2708------------------| "
- - "L0.2709[40,54] 955ns 141kb |----------------L0.2709-----------------|"
- - "L0.2713[24,39] 956ns 152kb|------------------L0.2713------------------| "
- - "L0.2714[40,54] 956ns 141kb |----------------L0.2714-----------------|"
- - "L0.2718[24,39] 957ns 152kb|------------------L0.2718------------------| "
- - "L0.2719[40,54] 957ns 141kb |----------------L0.2719-----------------|"
- - "L0.2723[24,39] 958ns 152kb|------------------L0.2723------------------| "
- - "L0.2724[40,54] 958ns 141kb |----------------L0.2724-----------------|"
- - "L0.2728[24,39] 959ns 152kb|------------------L0.2728------------------| "
- - "L0.2729[40,54] 959ns 141kb |----------------L0.2729-----------------|"
- - "L0.2733[24,39] 960ns 152kb|------------------L0.2733------------------| "
- - "L0.2734[40,54] 960ns 141kb |----------------L0.2734-----------------|"
- - "L0.2738[24,39] 961ns 152kb|------------------L0.2738------------------| "
- - "L0.2739[40,54] 961ns 141kb |----------------L0.2739-----------------|"
- - "L0.2743[24,39] 962ns 152kb|------------------L0.2743------------------| "
- - "L0.2744[40,54] 962ns 141kb |----------------L0.2744-----------------|"
- - "L0.2748[24,39] 963ns 152kb|------------------L0.2748------------------| "
- - "L0.2749[40,54] 963ns 141kb |----------------L0.2749-----------------|"
- - "L0.2753[24,39] 964ns 152kb|------------------L0.2753------------------| "
- - "L0.2754[40,54] 964ns 141kb |----------------L0.2754-----------------|"
- - "L0.2758[24,39] 965ns 152kb|------------------L0.2758------------------| "
- - "L0.2759[40,54] 965ns 141kb |----------------L0.2759-----------------|"
- - "L0.2763[24,39] 966ns 152kb|------------------L0.2763------------------| "
- - "L0.2764[40,54] 966ns 141kb |----------------L0.2764-----------------|"
- - "L0.2768[24,39] 967ns 152kb|------------------L0.2768------------------| "
- - "L0.2769[40,54] 967ns 141kb |----------------L0.2769-----------------|"
- - "L0.2773[24,39] 968ns 152kb|------------------L0.2773------------------| "
- - "L0.2774[40,54] 968ns 141kb |----------------L0.2774-----------------|"
- - "L0.2778[24,39] 969ns 152kb|------------------L0.2778------------------| "
- - "L0.2779[40,54] 969ns 141kb |----------------L0.2779-----------------|"
- - "L0.2783[24,39] 970ns 152kb|------------------L0.2783------------------| "
- - "L0.2784[40,54] 970ns 141kb |----------------L0.2784-----------------|"
- - "L0.2788[24,39] 971ns 152kb|------------------L0.2788------------------| "
- - "L0.2789[40,54] 971ns 141kb |----------------L0.2789-----------------|"
- - "L0.2793[24,39] 972ns 152kb|------------------L0.2793------------------| "
- - "L0.2794[40,54] 972ns 141kb |----------------L0.2794-----------------|"
- - "L0.2798[24,39] 973ns 152kb|------------------L0.2798------------------| "
- - "L0.2799[40,54] 973ns 141kb |----------------L0.2799-----------------|"
- - "L0.2803[24,39] 974ns 152kb|------------------L0.2803------------------| "
- - "L0.2804[40,54] 974ns 141kb |----------------L0.2804-----------------|"
- - "L0.2808[24,39] 975ns 152kb|------------------L0.2808------------------| "
- - "L0.2809[40,54] 975ns 141kb |----------------L0.2809-----------------|"
- - "L0.2813[24,39] 976ns 152kb|------------------L0.2813------------------| "
- - "L0.2814[40,54] 976ns 141kb |----------------L0.2814-----------------|"
- - "L0.2818[24,39] 977ns 152kb|------------------L0.2818------------------| "
- - "L0.2819[40,54] 977ns 141kb |----------------L0.2819-----------------|"
- - "L0.2823[24,39] 978ns 152kb|------------------L0.2823------------------| "
- - "L0.2824[40,54] 978ns 141kb |----------------L0.2824-----------------|"
- - "L0.2828[24,39] 979ns 152kb|------------------L0.2828------------------| "
- - "L0.2829[40,54] 979ns 141kb |----------------L0.2829-----------------|"
- - "L0.2833[24,39] 980ns 152kb|------------------L0.2833------------------| "
- - "L0.2834[40,54] 980ns 141kb |----------------L0.2834-----------------|"
- - "L0.2838[24,39] 981ns 152kb|------------------L0.2838------------------| "
- - "L0.2839[40,54] 981ns 141kb |----------------L0.2839-----------------|"
- - "L0.2843[24,39] 982ns 152kb|------------------L0.2843------------------| "
- - "L0.2844[40,54] 982ns 141kb |----------------L0.2844-----------------|"
- - "L0.2848[24,39] 983ns 152kb|------------------L0.2848------------------| "
- - "L0.2849[40,54] 983ns 141kb |----------------L0.2849-----------------|"
- - "L0.2853[24,39] 984ns 152kb|------------------L0.2853------------------| "
- - "L0.2854[40,54] 984ns 141kb |----------------L0.2854-----------------|"
- - "L0.2858[24,39] 985ns 152kb|------------------L0.2858------------------| "
- - "L0.2859[40,54] 985ns 141kb |----------------L0.2859-----------------|"
- - "L0.2863[24,39] 986ns 152kb|------------------L0.2863------------------| "
- - "L0.2864[40,54] 986ns 141kb |----------------L0.2864-----------------|"
- - "L0.2868[24,39] 987ns 152kb|------------------L0.2868------------------| "
- - "L0.2869[40,54] 987ns 141kb |----------------L0.2869-----------------|"
- - "L0.2873[24,39] 988ns 152kb|------------------L0.2873------------------| "
- - "L0.2874[40,54] 988ns 141kb |----------------L0.2874-----------------|"
- - "L0.2878[24,39] 989ns 152kb|------------------L0.2878------------------| "
- - "L0.2879[40,54] 989ns 141kb |----------------L0.2879-----------------|"
- - "L0.2883[24,39] 990ns 152kb|------------------L0.2883------------------| "
- - "L0.2884[40,54] 990ns 141kb |----------------L0.2884-----------------|"
- - "L0.2888[24,39] 991ns 152kb|------------------L0.2888------------------| "
- - "L0.2889[40,54] 991ns 141kb |----------------L0.2889-----------------|"
- - "L0.2893[24,39] 992ns 152kb|------------------L0.2893------------------| "
- - "L0.2894[40,54] 992ns 141kb |----------------L0.2894-----------------|"
- - "L0.2898[24,39] 993ns 152kb|------------------L0.2898------------------| "
- - "L0.2899[40,54] 993ns 141kb |----------------L0.2899-----------------|"
- - "L0.2903[24,39] 994ns 152kb|------------------L0.2903------------------| "
- - "L0.2904[40,54] 994ns 141kb |----------------L0.2904-----------------|"
- - "L0.2908[24,39] 995ns 152kb|------------------L0.2908------------------| "
- - "L0.2909[40,54] 995ns 141kb |----------------L0.2909-----------------|"
- - "L0.2913[24,39] 996ns 152kb|------------------L0.2913------------------| "
- - "L0.2914[40,54] 996ns 141kb |----------------L0.2914-----------------|"
- - "L0.2918[24,39] 997ns 152kb|------------------L0.2918------------------| "
- - "L0.2919[40,54] 997ns 141kb |----------------L0.2919-----------------|"
- - "L0.2923[24,39] 998ns 152kb|------------------L0.2923------------------| "
- - "L0.2924[40,54] 998ns 141kb |----------------L0.2924-----------------|"
- - "L0.2928[24,39] 999ns 152kb|------------------L0.2928------------------| "
- - "L0.2929[40,54] 999ns 141kb |----------------L0.2929-----------------|"
- - "L0.2933[24,39] 1us 152kb |------------------L0.2933------------------| "
- - "L0.2934[40,54] 1us 141kb |----------------L0.2934-----------------|"
- - "L0.2938[24,39] 1us 152kb |------------------L0.2938------------------| "
- - "L0.2939[40,54] 1us 141kb |----------------L0.2939-----------------|"
- - "L0.2943[24,39] 1us 152kb |------------------L0.2943------------------| "
- - "L0.2944[40,54] 1us 141kb |----------------L0.2944-----------------|"
- - "L0.2948[24,39] 1us 152kb |------------------L0.2948------------------| "
- - "L0.2949[40,54] 1us 141kb |----------------L0.2949-----------------|"
- - "L0.2953[24,39] 1us 152kb |------------------L0.2953------------------| "
- - "L0.2954[40,54] 1us 141kb |----------------L0.2954-----------------|"
- - "L0.2958[24,39] 1us 152kb |------------------L0.2958------------------| "
- - "L0.2959[40,54] 1us 141kb |----------------L0.2959-----------------|"
- - "L0.2963[24,39] 1.01us 152kb|------------------L0.2963------------------| "
- - "L0.2964[40,54] 1.01us 141kb |----------------L0.2964-----------------|"
- - "L0.2968[24,39] 1.01us 152kb|------------------L0.2968------------------| "
- - "L0.2969[40,54] 1.01us 141kb |----------------L0.2969-----------------|"
- - "L0.2973[24,39] 1.01us 152kb|------------------L0.2973------------------| "
- - "L0.2974[40,54] 1.01us 141kb |----------------L0.2974-----------------|"
- - "L0.2978[24,39] 1.01us 152kb|------------------L0.2978------------------| "
- - "L0.2979[40,54] 1.01us 141kb |----------------L0.2979-----------------|"
- - "L0.2983[24,39] 1.01us 152kb|------------------L0.2983------------------| "
- - "L0.2984[40,54] 1.01us 141kb |----------------L0.2984-----------------|"
- - "L0.2988[24,39] 1.01us 152kb|------------------L0.2988------------------| "
- - "L0.2989[40,54] 1.01us 141kb |----------------L0.2989-----------------|"
- - "L0.2993[24,39] 1.01us 152kb|------------------L0.2993------------------| "
- - "L0.2994[40,54] 1.01us 141kb |----------------L0.2994-----------------|"
- - "L0.2998[24,39] 1.01us 152kb|------------------L0.2998------------------| "
- - "L0.2999[40,54] 1.01us 141kb |----------------L0.2999-----------------|"
- - "L0.3003[24,39] 1.01us 152kb|------------------L0.3003------------------| "
- - "**** 2 Output Files (parquet_file_id not yet assigned), 29mb total:"
- - "L0 "
- - "L0.?[24,48] 1.01us 23mb |---------------------------------L0.?---------------------------------| "
- - "L0.?[49,54] 1.01us 6mb |----L0.?-----|"
- - "Committing partition 1:"
- - " Soft Deleting 200 files: L0.2504, L0.2508, L0.2509, L0.2513, L0.2514, L0.2518, L0.2519, L0.2523, L0.2524, L0.2528, L0.2529, L0.2533, L0.2534, L0.2538, L0.2539, L0.2543, L0.2544, L0.2548, L0.2549, L0.2553, L0.2554, L0.2558, L0.2559, L0.2563, L0.2564, L0.2568, L0.2569, L0.2573, L0.2574, L0.2578, L0.2579, L0.2583, L0.2584, L0.2588, L0.2589, L0.2593, L0.2594, L0.2598, L0.2599, L0.2603, L0.2604, L0.2608, L0.2609, L0.2613, L0.2614, L0.2618, L0.2619, L0.2623, L0.2624, L0.2628, L0.2629, L0.2633, L0.2634, L0.2638, L0.2639, L0.2643, L0.2644, L0.2648, L0.2649, L0.2653, L0.2654, L0.2658, L0.2659, L0.2663, L0.2664, L0.2668, L0.2669, L0.2673, L0.2674, L0.2678, L0.2679, L0.2683, L0.2684, L0.2688, L0.2689, L0.2693, L0.2694, L0.2698, L0.2699, L0.2703, L0.2704, L0.2708, L0.2709, L0.2713, L0.2714, L0.2718, L0.2719, L0.2723, L0.2724, L0.2728, L0.2729, L0.2733, L0.2734, L0.2738, L0.2739, L0.2743, L0.2744, L0.2748, L0.2749, L0.2753, L0.2754, L0.2758, L0.2759, L0.2763, L0.2764, L0.2768, L0.2769, L0.2773, L0.2774, L0.2778, L0.2779, L0.2783, L0.2784, L0.2788, L0.2789, L0.2793, L0.2794, L0.2798, L0.2799, L0.2803, L0.2804, L0.2808, L0.2809, L0.2813, L0.2814, L0.2818, L0.2819, L0.2823, L0.2824, L0.2828, L0.2829, L0.2833, L0.2834, L0.2838, L0.2839, L0.2843, L0.2844, L0.2848, L0.2849, L0.2853, L0.2854, L0.2858, L0.2859, L0.2863, L0.2864, L0.2868, L0.2869, L0.2873, L0.2874, L0.2878, L0.2879, L0.2883, L0.2884, L0.2888, L0.2889, L0.2893, L0.2894, L0.2898, L0.2899, L0.2903, L0.2904, L0.2908, L0.2909, L0.2913, L0.2914, L0.2918, L0.2919, L0.2923, L0.2924, L0.2928, L0.2929, L0.2933, L0.2934, L0.2938, L0.2939, L0.2943, L0.2944, L0.2948, L0.2949, L0.2953, L0.2954, L0.2958, L0.2959, L0.2963, L0.2964, L0.2968, L0.2969, L0.2973, L0.2974, L0.2978, L0.2979, L0.2983, L0.2984, L0.2988, L0.2989, L0.2993, L0.2994, L0.2998, L0.2999, L0.3003"
- - " Creating 2 files"
- - "**** Simulation run 415, type=compact(ManySmallFiles). 13 Input Files, 2mb total:"
- - "L0 "
- - "L0.3004[40,54] 1.01us 141kb |----------------L0.3004-----------------|"
- - "L0.3008[24,39] 1.01us 152kb|------------------L0.3008------------------| "
- - "L0.3009[40,54] 1.01us 141kb |----------------L0.3009-----------------|"
- - "L0.3013[24,39] 1.02us 152kb|------------------L0.3013------------------| "
- - "L0.3014[40,54] 1.02us 141kb |----------------L0.3014-----------------|"
- - "L0.3018[24,39] 1.02us 152kb|------------------L0.3018------------------| "
- - "L0.3019[40,54] 1.02us 141kb |----------------L0.3019-----------------|"
- - "L0.3023[24,39] 1.02us 152kb|------------------L0.3023------------------| "
- - "L0.3024[40,54] 1.02us 141kb |----------------L0.3024-----------------|"
- - "L0.3028[24,39] 1.02us 152kb|------------------L0.3028------------------| "
- - "L0.3029[40,54] 1.02us 141kb |----------------L0.3029-----------------|"
- - "L0.3033[24,39] 1.02us 152kb|------------------L0.3033------------------| "
- - "L0.3034[40,54] 1.02us 141kb |----------------L0.3034-----------------|"
- - "**** 1 Output Files (parquet_file_id not yet assigned), 2mb total:"
- - "L0, all files 2mb "
- - "L0.?[24,54] 1.02us |------------------------------------------L0.?------------------------------------------|"
- - "Committing partition 1:"
- - " Soft Deleting 13 files: L0.3004, L0.3008, L0.3009, L0.3013, L0.3014, L0.3018, L0.3019, L0.3023, L0.3024, L0.3028, L0.3029, L0.3033, L0.3034"
- - " Creating 1 files"
- - "**** Simulation run 416, type=split(CompactAndSplitOutput(ManySmallFiles))(split_times=[71]). 200 Input Files, 191mb total:"
- - "L0 "
- - "L0.3039[55,60] 615ns 35mb|---L0.3039---| "
- - "L0.3040[61,69] 615ns 47mb |-------L0.3040--------| "
- - "L0.3041[70,78] 615ns 53mb |-------L0.3041--------| "
- - "L0.3042[79,84] 615ns 29mb |---L0.3042---| "
- - "L0.1015[55,69] 616ns 141kb|-----------------L0.1015-----------------| "
- - "L0.1016[70,84] 616ns 141kb |-----------------L0.1016-----------------| "
- - "L0.1020[55,69] 617ns 141kb|-----------------L0.1020-----------------| "
- - "L0.1021[70,84] 617ns 141kb |-----------------L0.1021-----------------| "
- - "L0.1025[55,69] 618ns 141kb|-----------------L0.1025-----------------| "
- - "L0.1026[70,84] 618ns 141kb |-----------------L0.1026-----------------| "
- - "L0.1030[55,69] 619ns 141kb|-----------------L0.1030-----------------| "
- - "L0.1031[70,84] 619ns 141kb |-----------------L0.1031-----------------| "
- - "L0.1035[55,69] 620ns 141kb|-----------------L0.1035-----------------| "
- - "L0.1036[70,84] 620ns 141kb |-----------------L0.1036-----------------| "
- - "L0.1040[55,69] 621ns 141kb|-----------------L0.1040-----------------| "
- - "L0.1041[70,84] 621ns 141kb |-----------------L0.1041-----------------| "
- - "L0.1045[55,69] 622ns 141kb|-----------------L0.1045-----------------| "
- - "L0.1046[70,84] 622ns 141kb |-----------------L0.1046-----------------| "
- - "L0.1050[55,69] 623ns 141kb|-----------------L0.1050-----------------| "
- - "L0.1051[70,84] 623ns 141kb |-----------------L0.1051-----------------| "
- - "L0.1055[55,69] 624ns 141kb|-----------------L0.1055-----------------| "
- - "L0.1056[70,84] 624ns 141kb |-----------------L0.1056-----------------| "
- - "L0.1060[55,69] 625ns 141kb|-----------------L0.1060-----------------| "
- - "L0.1061[70,84] 625ns 141kb |-----------------L0.1061-----------------| "
- - "L0.1065[55,69] 626ns 141kb|-----------------L0.1065-----------------| "
- - "L0.1066[70,84] 626ns 141kb |-----------------L0.1066-----------------| "
- - "L0.1070[55,69] 627ns 141kb|-----------------L0.1070-----------------| "
- - "L0.1071[70,84] 627ns 141kb |-----------------L0.1071-----------------| "
- - "L0.1075[55,69] 628ns 141kb|-----------------L0.1075-----------------| "
- - "L0.1076[70,84] 628ns 141kb |-----------------L0.1076-----------------| "
- - "L0.1080[55,69] 629ns 141kb|-----------------L0.1080-----------------| "
- - "L0.1081[70,84] 629ns 141kb |-----------------L0.1081-----------------| "
- - "L0.1085[55,69] 630ns 141kb|-----------------L0.1085-----------------| "
- - "L0.1086[70,84] 630ns 141kb |-----------------L0.1086-----------------| "
- - "L0.1090[55,69] 631ns 141kb|-----------------L0.1090-----------------| "
- - "L0.1091[70,84] 631ns 141kb |-----------------L0.1091-----------------| "
- - "L0.1095[55,69] 632ns 141kb|-----------------L0.1095-----------------| "
- - "L0.1096[70,84] 632ns 141kb |-----------------L0.1096-----------------| "
- - "L0.1100[55,69] 633ns 141kb|-----------------L0.1100-----------------| "
- - "L0.1101[70,84] 633ns 141kb |-----------------L0.1101-----------------| "
- - "L0.1105[55,69] 634ns 141kb|-----------------L0.1105-----------------| "
- - "L0.1106[70,84] 634ns 141kb |-----------------L0.1106-----------------| "
- - "L0.1110[55,69] 635ns 141kb|-----------------L0.1110-----------------| "
- - "L0.1111[70,84] 635ns 141kb |-----------------L0.1111-----------------| "
- - "L0.1115[55,69] 636ns 141kb|-----------------L0.1115-----------------| "
- - "L0.1116[70,84] 636ns 141kb |-----------------L0.1116-----------------| "
- - "L0.1120[55,69] 637ns 141kb|-----------------L0.1120-----------------| "
- - "L0.1121[70,84] 637ns 141kb |-----------------L0.1121-----------------| "
- - "L0.1125[55,69] 638ns 141kb|-----------------L0.1125-----------------| "
- - "L0.1126[70,84] 638ns 141kb |-----------------L0.1126-----------------| "
- - "L0.1130[55,69] 639ns 141kb|-----------------L0.1130-----------------| "
- - "L0.1131[70,84] 639ns 141kb |-----------------L0.1131-----------------| "
- - "L0.1135[55,69] 640ns 141kb|-----------------L0.1135-----------------| "
- - "L0.1136[70,84] 640ns 141kb |-----------------L0.1136-----------------| "
- - "L0.1140[55,69] 641ns 141kb|-----------------L0.1140-----------------| "
- - "L0.1141[70,84] 641ns 141kb |-----------------L0.1141-----------------| "
- - "L0.1145[55,69] 642ns 141kb|-----------------L0.1145-----------------| "
- - "L0.1146[70,84] 642ns 141kb |-----------------L0.1146-----------------| "
- - "L0.1150[55,69] 643ns 141kb|-----------------L0.1150-----------------| "
- - "L0.1151[70,84] 643ns 141kb |-----------------L0.1151-----------------| "
- - "L0.1155[55,69] 644ns 141kb|-----------------L0.1155-----------------| "
- - "L0.1156[70,84] 644ns 141kb |-----------------L0.1156-----------------| "
- - "L0.1160[55,69] 645ns 141kb|-----------------L0.1160-----------------| "
- - "L0.1161[70,84] 645ns 141kb |-----------------L0.1161-----------------| "
- - "L0.1165[55,69] 646ns 141kb|-----------------L0.1165-----------------| "
- - "L0.1166[70,84] 646ns 141kb |-----------------L0.1166-----------------| "
- - "L0.1170[55,69] 647ns 141kb|-----------------L0.1170-----------------| "
- - "L0.1171[70,84] 647ns 141kb |-----------------L0.1171-----------------| "
- - "L0.1215[55,69] 648ns 141kb|-----------------L0.1215-----------------| "
- - "L0.1216[70,84] 648ns 141kb |-----------------L0.1216-----------------| "
- - "L0.1220[55,69] 649ns 141kb|-----------------L0.1220-----------------| "
- - "L0.1221[70,84] 649ns 141kb |-----------------L0.1221-----------------| "
- - "L0.1175[55,69] 650ns 141kb|-----------------L0.1175-----------------| "
- - "L0.1176[70,84] 650ns 141kb |-----------------L0.1176-----------------| "
- - "L0.1180[55,69] 651ns 141kb|-----------------L0.1180-----------------| "
- - "L0.1181[70,84] 651ns 141kb |-----------------L0.1181-----------------| "
- - "L0.1185[55,69] 652ns 141kb|-----------------L0.1185-----------------| "
- - "L0.1186[70,84] 652ns 141kb |-----------------L0.1186-----------------| "
- - "L0.1190[55,69] 653ns 141kb|-----------------L0.1190-----------------| "
- - "L0.1191[70,84] 653ns 141kb |-----------------L0.1191-----------------| "
- - "L0.1195[55,69] 654ns 141kb|-----------------L0.1195-----------------| "
- - "L0.1196[70,84] 654ns 141kb |-----------------L0.1196-----------------| "
- - "L0.1200[55,69] 655ns 141kb|-----------------L0.1200-----------------| "
- - "L0.1201[70,84] 655ns 141kb |-----------------L0.1201-----------------| "
- - "L0.1205[55,69] 656ns 141kb|-----------------L0.1205-----------------| "
- - "L0.1206[70,84] 656ns 141kb |-----------------L0.1206-----------------| "
- - "L0.1210[55,69] 657ns 141kb|-----------------L0.1210-----------------| "
- - "L0.1211[70,84] 657ns 141kb |-----------------L0.1211-----------------| "
- - "L0.1225[55,69] 658ns 141kb|-----------------L0.1225-----------------| "
- - "L0.1226[70,84] 658ns 141kb |-----------------L0.1226-----------------| "
- - "L0.1230[55,69] 659ns 141kb|-----------------L0.1230-----------------| "
- - "L0.1231[70,84] 659ns 141kb |-----------------L0.1231-----------------| "
- - "L0.1235[55,69] 660ns 141kb|-----------------L0.1235-----------------| "
- - "L0.1236[70,84] 660ns 141kb |-----------------L0.1236-----------------| "
- - "L0.1240[55,69] 661ns 141kb|-----------------L0.1240-----------------| "
- - "L0.1241[70,84] 661ns 141kb |-----------------L0.1241-----------------| "
- - "L0.1245[55,69] 662ns 141kb|-----------------L0.1245-----------------| "
- - "L0.1246[70,84] 662ns 141kb |-----------------L0.1246-----------------| "
- - "L0.1250[55,69] 663ns 141kb|-----------------L0.1250-----------------| "
- - "L0.1251[70,84] 663ns 141kb |-----------------L0.1251-----------------| "
- - "L0.1255[55,69] 664ns 141kb|-----------------L0.1255-----------------| "
- - "L0.1256[70,84] 664ns 141kb |-----------------L0.1256-----------------| "
- - "L0.1260[55,69] 665ns 141kb|-----------------L0.1260-----------------| "
- - "L0.1261[70,84] 665ns 141kb |-----------------L0.1261-----------------| "
- - "L0.1265[55,69] 666ns 141kb|-----------------L0.1265-----------------| "
- - "L0.1266[70,84] 666ns 141kb |-----------------L0.1266-----------------| "
- - "L0.1270[55,69] 667ns 141kb|-----------------L0.1270-----------------| "
- - "L0.1271[70,84] 667ns 141kb |-----------------L0.1271-----------------| "
- - "L0.1275[55,69] 668ns 141kb|-----------------L0.1275-----------------| "
- - "L0.1276[70,84] 668ns 141kb |-----------------L0.1276-----------------| "
- - "L0.1280[55,69] 669ns 141kb|-----------------L0.1280-----------------| "
- - "L0.1281[70,84] 669ns 141kb |-----------------L0.1281-----------------| "
- - "L0.1285[55,69] 670ns 141kb|-----------------L0.1285-----------------| "
- - "L0.1286[70,84] 670ns 141kb |-----------------L0.1286-----------------| "
- - "L0.1290[55,69] 671ns 141kb|-----------------L0.1290-----------------| "
- - "L0.1291[70,84] 671ns 141kb |-----------------L0.1291-----------------| "
- - "L0.1295[55,69] 672ns 141kb|-----------------L0.1295-----------------| "
- - "L0.1296[70,84] 672ns 141kb |-----------------L0.1296-----------------| "
- - "L0.1300[55,69] 673ns 141kb|-----------------L0.1300-----------------| "
- - "L0.1301[70,84] 673ns 141kb |-----------------L0.1301-----------------| "
- - "L0.1305[55,69] 674ns 141kb|-----------------L0.1305-----------------| "
- - "L0.1306[70,84] 674ns 141kb |-----------------L0.1306-----------------| "
- - "L0.1310[55,69] 675ns 141kb|-----------------L0.1310-----------------| "
- - "L0.1311[70,84] 675ns 141kb |-----------------L0.1311-----------------| "
- - "L0.1315[55,69] 676ns 141kb|-----------------L0.1315-----------------| "
- - "L0.1316[70,84] 676ns 141kb |-----------------L0.1316-----------------| "
- - "L0.1320[55,69] 677ns 141kb|-----------------L0.1320-----------------| "
- - "L0.1321[70,84] 677ns 141kb |-----------------L0.1321-----------------| "
- - "L0.1325[55,69] 678ns 141kb|-----------------L0.1325-----------------| "
- - "L0.1326[70,84] 678ns 141kb |-----------------L0.1326-----------------| "
- - "L0.1330[55,69] 679ns 141kb|-----------------L0.1330-----------------| "
- - "L0.1331[70,84] 679ns 141kb |-----------------L0.1331-----------------| "
- - "L0.1335[55,69] 680ns 141kb|-----------------L0.1335-----------------| "
- - "L0.1336[70,84] 680ns 141kb |-----------------L0.1336-----------------| "
- - "L0.1340[55,69] 681ns 141kb|-----------------L0.1340-----------------| "
- - "L0.1341[70,84] 681ns 141kb |-----------------L0.1341-----------------| "
- - "L0.1345[55,69] 682ns 141kb|-----------------L0.1345-----------------| "
- - "L0.1346[70,84] 682ns 141kb |-----------------L0.1346-----------------| "
- - "L0.1350[55,69] 683ns 141kb|-----------------L0.1350-----------------| "
- - "L0.1351[70,84] 683ns 141kb |-----------------L0.1351-----------------| "
- - "L0.1355[55,69] 684ns 141kb|-----------------L0.1355-----------------| "
- - "L0.1356[70,84] 684ns 141kb |-----------------L0.1356-----------------| "
- - "L0.1360[55,69] 685ns 141kb|-----------------L0.1360-----------------| "
- - "L0.1361[70,84] 685ns 141kb |-----------------L0.1361-----------------| "
- - "L0.1365[55,69] 686ns 141kb|-----------------L0.1365-----------------| "
- - "L0.1366[70,84] 686ns 141kb |-----------------L0.1366-----------------| "
- - "L0.1370[55,69] 687ns 141kb|-----------------L0.1370-----------------| "
- - "L0.1371[70,84] 687ns 141kb |-----------------L0.1371-----------------| "
- - "L0.1375[55,69] 688ns 141kb|-----------------L0.1375-----------------| "
- - "L0.1376[70,84] 688ns 141kb |-----------------L0.1376-----------------| "
- - "L0.1380[55,69] 689ns 141kb|-----------------L0.1380-----------------| "
- - "L0.1381[70,84] 689ns 141kb |-----------------L0.1381-----------------| "
- - "L0.1385[55,69] 690ns 141kb|-----------------L0.1385-----------------| "
- - "L0.1386[70,84] 690ns 141kb |-----------------L0.1386-----------------| "
- - "L0.1390[55,69] 691ns 141kb|-----------------L0.1390-----------------| "
- - "L0.1391[70,84] 691ns 141kb |-----------------L0.1391-----------------| "
- - "L0.1395[55,69] 692ns 141kb|-----------------L0.1395-----------------| "
- - "L0.1396[70,84] 692ns 141kb |-----------------L0.1396-----------------| "
- - "L0.1400[55,69] 693ns 141kb|-----------------L0.1400-----------------| "
- - "L0.1401[70,84] 693ns 141kb |-----------------L0.1401-----------------| "
- - "L0.1405[55,69] 694ns 141kb|-----------------L0.1405-----------------| "
- - "L0.1406[70,84] 694ns 141kb |-----------------L0.1406-----------------| "
- - "L0.1410[55,69] 695ns 141kb|-----------------L0.1410-----------------| "
- - "L0.1411[70,84] 695ns 141kb |-----------------L0.1411-----------------| "
- - "L0.1415[55,69] 696ns 141kb|-----------------L0.1415-----------------| "
- - "L0.1416[70,84] 696ns 141kb |-----------------L0.1416-----------------| "
- - "L0.1420[55,69] 697ns 141kb|-----------------L0.1420-----------------| "
- - "L0.1421[70,84] 697ns 141kb |-----------------L0.1421-----------------| "
- - "L0.1425[55,69] 698ns 141kb|-----------------L0.1425-----------------| "
- - "L0.1426[70,84] 698ns 141kb |-----------------L0.1426-----------------| "
- - "L0.1430[55,69] 699ns 141kb|-----------------L0.1430-----------------| "
- - "L0.1431[70,84] 699ns 141kb |-----------------L0.1431-----------------| "
- - "L0.1435[55,69] 700ns 141kb|-----------------L0.1435-----------------| "
- - "L0.1436[70,84] 700ns 141kb |-----------------L0.1436-----------------| "
- - "L0.1440[55,69] 701ns 141kb|-----------------L0.1440-----------------| "
- - "L0.1441[70,84] 701ns 141kb |-----------------L0.1441-----------------| "
- - "L0.1445[55,69] 702ns 141kb|-----------------L0.1445-----------------| "
- - "L0.1446[70,84] 702ns 141kb |-----------------L0.1446-----------------| "
- - "L0.1450[55,69] 703ns 141kb|-----------------L0.1450-----------------| "
- - "L0.1451[70,84] 703ns 141kb |-----------------L0.1451-----------------| "
- - "L0.1455[55,69] 704ns 141kb|-----------------L0.1455-----------------| "
- - "L0.1456[70,84] 704ns 141kb |-----------------L0.1456-----------------| "
- - "L0.1460[55,69] 705ns 141kb|-----------------L0.1460-----------------| "
- - "L0.1461[70,84] 705ns 141kb |-----------------L0.1461-----------------| "
- - "L0.1465[55,69] 706ns 141kb|-----------------L0.1465-----------------| "
- - "L0.1466[70,84] 706ns 141kb |-----------------L0.1466-----------------| "
- - "L0.1470[55,69] 707ns 141kb|-----------------L0.1470-----------------| "
- - "L0.1471[70,84] 707ns 141kb |-----------------L0.1471-----------------| "
- - "L0.1475[55,69] 708ns 141kb|-----------------L0.1475-----------------| "
- - "L0.1476[70,84] 708ns 141kb |-----------------L0.1476-----------------| "
- - "L0.1480[55,69] 709ns 141kb|-----------------L0.1480-----------------| "
- - "L0.1481[70,84] 709ns 141kb |-----------------L0.1481-----------------| "
- - "L0.1485[55,69] 710ns 141kb|-----------------L0.1485-----------------| "
- - "L0.1486[70,84] 710ns 141kb |-----------------L0.1486-----------------| "
- - "L0.1490[55,69] 711ns 141kb|-----------------L0.1490-----------------| "
- - "L0.1491[70,84] 711ns 141kb |-----------------L0.1491-----------------| "
- - "L0.1495[55,69] 712ns 141kb|-----------------L0.1495-----------------| "
- - "L0.1496[70,84] 712ns 141kb |-----------------L0.1496-----------------| "
- - "L0.1500[55,69] 713ns 141kb|-----------------L0.1500-----------------| "
- - "L0.1501[70,84] 713ns 141kb |-----------------L0.1501-----------------| "
- - "**** 2 Output Files (parquet_file_id not yet assigned), 191mb total:"
- - "L0 "
- - "L0.?[55,71] 713ns 106mb |---------------------L0.?----------------------| "
- - "L0.?[72,84] 713ns 86mb |---------------L0.?----------------| "
- - "Committing partition 1:"
- - " Soft Deleting 200 files: L0.1015, L0.1016, L0.1020, L0.1021, L0.1025, L0.1026, L0.1030, L0.1031, L0.1035, L0.1036, L0.1040, L0.1041, L0.1045, L0.1046, L0.1050, L0.1051, L0.1055, L0.1056, L0.1060, L0.1061, L0.1065, L0.1066, L0.1070, L0.1071, L0.1075, L0.1076, L0.1080, L0.1081, L0.1085, L0.1086, L0.1090, L0.1091, L0.1095, L0.1096, L0.1100, L0.1101, L0.1105, L0.1106, L0.1110, L0.1111, L0.1115, L0.1116, L0.1120, L0.1121, L0.1125, L0.1126, L0.1130, L0.1131, L0.1135, L0.1136, L0.1140, L0.1141, L0.1145, L0.1146, L0.1150, L0.1151, L0.1155, L0.1156, L0.1160, L0.1161, L0.1165, L0.1166, L0.1170, L0.1171, L0.1175, L0.1176, L0.1180, L0.1181, L0.1185, L0.1186, L0.1190, L0.1191, L0.1195, L0.1196, L0.1200, L0.1201, L0.1205, L0.1206, L0.1210, L0.1211, L0.1215, L0.1216, L0.1220, L0.1221, L0.1225, L0.1226, L0.1230, L0.1231, L0.1235, L0.1236, L0.1240, L0.1241, L0.1245, L0.1246, L0.1250, L0.1251, L0.1255, L0.1256, L0.1260, L0.1261, L0.1265, L0.1266, L0.1270, L0.1271, L0.1275, L0.1276, L0.1280, L0.1281, L0.1285, L0.1286, L0.1290, L0.1291, L0.1295, L0.1296, L0.1300, L0.1301, L0.1305, L0.1306, L0.1310, L0.1311, L0.1315, L0.1316, L0.1320, L0.1321, L0.1325, L0.1326, L0.1330, L0.1331, L0.1335, L0.1336, L0.1340, L0.1341, L0.1345, L0.1346, L0.1350, L0.1351, L0.1355, L0.1356, L0.1360, L0.1361, L0.1365, L0.1366, L0.1370, L0.1371, L0.1375, L0.1376, L0.1380, L0.1381, L0.1385, L0.1386, L0.1390, L0.1391, L0.1395, L0.1396, L0.1400, L0.1401, L0.1405, L0.1406, L0.1410, L0.1411, L0.1415, L0.1416, L0.1420, L0.1421, L0.1425, L0.1426, L0.1430, L0.1431, L0.1435, L0.1436, L0.1440, L0.1441, L0.1445, L0.1446, L0.1450, L0.1451, L0.1455, L0.1456, L0.1460, L0.1461, L0.1465, L0.1466, L0.1470, L0.1471, L0.1475, L0.1476, L0.1480, L0.1481, L0.1485, L0.1486, L0.1490, L0.1491, L0.1495, L0.1496, L0.1500, L0.1501, L0.3039, L0.3040, L0.3041, L0.3042"
- - " Creating 2 files"
- - "**** Simulation run 417, type=split(CompactAndSplitOutput(ManySmallFiles))(split_times=[78]). 200 Input Files, 28mb total:"
- - "L0, all files 141kb "
- - "L0.1505[55,69] 714ns |-----------------L0.1505-----------------| "
- - "L0.1506[70,84] 714ns |-----------------L0.1506-----------------| "
- - "L0.1510[55,69] 715ns |-----------------L0.1510-----------------| "
- - "L0.1511[70,84] 715ns |-----------------L0.1511-----------------| "
- - "L0.1515[55,69] 716ns |-----------------L0.1515-----------------| "
- - "L0.1516[70,84] 716ns |-----------------L0.1516-----------------| "
- - "L0.1520[55,69] 717ns |-----------------L0.1520-----------------| "
- - "L0.1521[70,84] 717ns |-----------------L0.1521-----------------| "
- - "L0.1525[55,69] 718ns |-----------------L0.1525-----------------| "
- - "L0.1526[70,84] 718ns |-----------------L0.1526-----------------| "
- - "L0.1530[55,69] 719ns |-----------------L0.1530-----------------| "
- - "L0.1531[70,84] 719ns |-----------------L0.1531-----------------| "
- - "L0.1535[55,69] 720ns |-----------------L0.1535-----------------| "
- - "L0.1536[70,84] 720ns |-----------------L0.1536-----------------| "
- - "L0.1540[55,69] 721ns |-----------------L0.1540-----------------| "
- - "L0.1541[70,84] 721ns |-----------------L0.1541-----------------| "
- - "L0.1545[55,69] 722ns |-----------------L0.1545-----------------| "
- - "L0.1546[70,84] 722ns |-----------------L0.1546-----------------| "
- - "L0.1550[55,69] 723ns |-----------------L0.1550-----------------| "
- - "L0.1551[70,84] 723ns |-----------------L0.1551-----------------| "
- - "L0.1555[55,69] 724ns |-----------------L0.1555-----------------| "
- - "L0.1556[70,84] 724ns |-----------------L0.1556-----------------| "
- - "L0.1560[55,69] 725ns |-----------------L0.1560-----------------| "
- - "L0.1561[70,84] 725ns |-----------------L0.1561-----------------| "
- - "L0.1565[55,69] 726ns |-----------------L0.1565-----------------| "
- - "L0.1566[70,84] 726ns |-----------------L0.1566-----------------| "
- - "L0.1570[55,69] 727ns |-----------------L0.1570-----------------| "
- - "L0.1571[70,84] 727ns |-----------------L0.1571-----------------| "
- - "L0.1575[55,69] 728ns |-----------------L0.1575-----------------| "
- - "L0.1576[70,84] 728ns |-----------------L0.1576-----------------| "
- - "L0.1580[55,69] 729ns |-----------------L0.1580-----------------| "
- - "L0.1581[70,84] 729ns |-----------------L0.1581-----------------| "
- - "L0.1585[55,69] 730ns |-----------------L0.1585-----------------| "
- - "L0.1586[70,84] 730ns |-----------------L0.1586-----------------| "
- - "L0.1590[55,69] 731ns |-----------------L0.1590-----------------| "
- - "L0.1591[70,84] 731ns |-----------------L0.1591-----------------| "
- - "L0.1595[55,69] 732ns |-----------------L0.1595-----------------| "
- - "L0.1596[70,84] 732ns |-----------------L0.1596-----------------| "
- - "L0.1600[55,69] 733ns |-----------------L0.1600-----------------| "
- - "L0.1601[70,84] 733ns |-----------------L0.1601-----------------| "
- - "L0.1605[55,69] 734ns |-----------------L0.1605-----------------| "
- - "L0.1606[70,84] 734ns |-----------------L0.1606-----------------| "
- - "L0.1610[55,69] 735ns |-----------------L0.1610-----------------| "
- - "L0.1611[70,84] 735ns |-----------------L0.1611-----------------| "
- - "L0.1615[55,69] 736ns |-----------------L0.1615-----------------| "
- - "L0.1616[70,84] 736ns |-----------------L0.1616-----------------| "
- - "L0.1620[55,69] 737ns |-----------------L0.1620-----------------| "
- - "L0.1621[70,84] 737ns |-----------------L0.1621-----------------| "
- - "L0.1625[55,69] 738ns |-----------------L0.1625-----------------| "
- - "L0.1626[70,84] 738ns |-----------------L0.1626-----------------| "
- - "L0.1630[55,69] 739ns |-----------------L0.1630-----------------| "
- - "L0.1631[70,84] 739ns |-----------------L0.1631-----------------| "
- - "L0.1635[55,69] 740ns |-----------------L0.1635-----------------| "
- - "L0.1636[70,84] 740ns |-----------------L0.1636-----------------| "
- - "L0.1640[55,69] 741ns |-----------------L0.1640-----------------| "
- - "L0.1641[70,84] 741ns |-----------------L0.1641-----------------| "
- - "L0.1645[55,69] 742ns |-----------------L0.1645-----------------| "
- - "L0.1646[70,84] 742ns |-----------------L0.1646-----------------| "
- - "L0.1650[55,69] 743ns |-----------------L0.1650-----------------| "
- - "L0.1651[70,84] 743ns |-----------------L0.1651-----------------| "
- - "L0.1655[55,69] 744ns |-----------------L0.1655-----------------| "
- - "L0.1656[70,84] 744ns |-----------------L0.1656-----------------| "
- - "L0.1660[55,69] 745ns |-----------------L0.1660-----------------| "
- - "L0.1661[70,84] 745ns |-----------------L0.1661-----------------| "
- - "L0.1665[55,69] 746ns |-----------------L0.1665-----------------| "
- - "L0.1666[70,84] 746ns |-----------------L0.1666-----------------| "
- - "L0.1670[55,69] 747ns |-----------------L0.1670-----------------| "
- - "L0.1671[70,84] 747ns |-----------------L0.1671-----------------| "
- - "L0.1675[55,69] 748ns |-----------------L0.1675-----------------| "
- - "L0.1676[70,84] 748ns |-----------------L0.1676-----------------| "
- - "L0.1680[55,69] 749ns |-----------------L0.1680-----------------| "
- - "L0.1681[70,84] 749ns |-----------------L0.1681-----------------| "
- - "L0.1685[55,69] 750ns |-----------------L0.1685-----------------| "
- - "L0.1686[70,84] 750ns |-----------------L0.1686-----------------| "
- - "L0.1690[55,69] 751ns |-----------------L0.1690-----------------| "
- - "L0.1691[70,84] 751ns |-----------------L0.1691-----------------| "
- - "L0.1695[55,69] 752ns |-----------------L0.1695-----------------| "
- - "L0.1696[70,84] 752ns |-----------------L0.1696-----------------| "
- - "L0.1700[55,69] 753ns |-----------------L0.1700-----------------| "
- - "L0.1701[70,84] 753ns |-----------------L0.1701-----------------| "
- - "L0.1705[55,69] 754ns |-----------------L0.1705-----------------| "
- - "L0.1706[70,84] 754ns |-----------------L0.1706-----------------| "
- - "L0.1710[55,69] 755ns |-----------------L0.1710-----------------| "
- - "L0.1711[70,84] 755ns |-----------------L0.1711-----------------| "
- - "L0.1715[55,69] 756ns |-----------------L0.1715-----------------| "
- - "L0.1716[70,84] 756ns |-----------------L0.1716-----------------| "
- - "L0.1720[55,69] 757ns |-----------------L0.1720-----------------| "
- - "L0.1721[70,84] 757ns |-----------------L0.1721-----------------| "
- - "L0.1725[55,69] 758ns |-----------------L0.1725-----------------| "
- - "L0.1726[70,84] 758ns |-----------------L0.1726-----------------| "
- - "L0.1730[55,69] 759ns |-----------------L0.1730-----------------| "
- - "L0.1731[70,84] 759ns |-----------------L0.1731-----------------| "
- - "L0.1735[55,69] 760ns |-----------------L0.1735-----------------| "
- - "L0.1736[70,84] 760ns |-----------------L0.1736-----------------| "
- - "L0.1740[55,69] 761ns |-----------------L0.1740-----------------| "
- - "L0.1741[70,84] 761ns |-----------------L0.1741-----------------| "
- - "L0.1745[55,69] 762ns |-----------------L0.1745-----------------| "
- - "L0.1746[70,84] 762ns |-----------------L0.1746-----------------| "
- - "L0.1750[55,69] 763ns |-----------------L0.1750-----------------| "
- - "L0.1751[70,84] 763ns |-----------------L0.1751-----------------| "
- - "L0.1755[55,69] 764ns |-----------------L0.1755-----------------| "
- - "L0.1756[70,84] 764ns |-----------------L0.1756-----------------| "
- - "L0.1760[55,69] 765ns |-----------------L0.1760-----------------| "
- - "L0.1761[70,84] 765ns |-----------------L0.1761-----------------| "
- - "L0.1765[55,69] 766ns |-----------------L0.1765-----------------| "
- - "L0.1766[70,84] 766ns |-----------------L0.1766-----------------| "
- - "L0.1770[55,69] 767ns |-----------------L0.1770-----------------| "
- - "L0.1771[70,84] 767ns |-----------------L0.1771-----------------| "
- - "L0.1775[55,69] 768ns |-----------------L0.1775-----------------| "
- - "L0.1776[70,84] 768ns |-----------------L0.1776-----------------| "
- - "L0.1780[55,69] 769ns |-----------------L0.1780-----------------| "
- - "L0.1781[70,84] 769ns |-----------------L0.1781-----------------| "
- - "L0.1785[55,69] 770ns |-----------------L0.1785-----------------| "
- - "L0.1786[70,84] 770ns |-----------------L0.1786-----------------| "
- - "L0.1790[55,69] 771ns |-----------------L0.1790-----------------| "
- - "L0.1791[70,84] 771ns |-----------------L0.1791-----------------| "
- - "L0.1795[55,69] 772ns |-----------------L0.1795-----------------| "
- - "L0.1796[70,84] 772ns |-----------------L0.1796-----------------| "
- - "L0.1800[55,69] 773ns |-----------------L0.1800-----------------| "
- - "L0.1801[70,84] 773ns |-----------------L0.1801-----------------| "
- - "L0.1805[55,69] 774ns |-----------------L0.1805-----------------| "
- - "L0.1806[70,84] 774ns |-----------------L0.1806-----------------| "
- - "L0.1810[55,69] 775ns |-----------------L0.1810-----------------| "
- - "L0.1811[70,84] 775ns |-----------------L0.1811-----------------| "
- - "L0.1855[55,69] 776ns |-----------------L0.1855-----------------| "
- - "L0.1856[70,84] 776ns |-----------------L0.1856-----------------| "
- - "L0.1860[55,69] 777ns |-----------------L0.1860-----------------| "
- - "L0.1861[70,84] 777ns |-----------------L0.1861-----------------| "
- - "L0.1815[55,69] 778ns |-----------------L0.1815-----------------| "
- - "L0.1816[70,84] 778ns |-----------------L0.1816-----------------| "
- - "L0.1820[55,69] 779ns |-----------------L0.1820-----------------| "
- - "L0.1821[70,84] 779ns |-----------------L0.1821-----------------| "
- - "L0.1825[55,69] 780ns |-----------------L0.1825-----------------| "
- - "L0.1826[70,84] 780ns |-----------------L0.1826-----------------| "
- - "L0.1830[55,69] 781ns |-----------------L0.1830-----------------| "
- - "L0.1831[70,84] 781ns |-----------------L0.1831-----------------| "
- - "L0.1835[55,69] 782ns |-----------------L0.1835-----------------| "
- - "L0.1836[70,84] 782ns |-----------------L0.1836-----------------| "
- - "L0.1840[55,69] 783ns |-----------------L0.1840-----------------| "
- - "L0.1841[70,84] 783ns |-----------------L0.1841-----------------| "
- - "L0.1845[55,69] 784ns |-----------------L0.1845-----------------| "
- - "L0.1846[70,84] 784ns |-----------------L0.1846-----------------| "
- - "L0.1850[55,69] 785ns |-----------------L0.1850-----------------| "
- - "L0.1851[70,84] 785ns |-----------------L0.1851-----------------| "
- - "L0.1865[55,69] 786ns |-----------------L0.1865-----------------| "
- - "L0.1866[70,84] 786ns |-----------------L0.1866-----------------| "
- - "L0.1870[55,69] 787ns |-----------------L0.1870-----------------| "
- - "L0.1871[70,84] 787ns |-----------------L0.1871-----------------| "
- - "L0.1875[55,69] 788ns |-----------------L0.1875-----------------| "
- - "L0.1876[70,84] 788ns |-----------------L0.1876-----------------| "
- - "L0.1880[55,69] 789ns |-----------------L0.1880-----------------| "
- - "L0.1881[70,84] 789ns |-----------------L0.1881-----------------| "
- - "L0.1885[55,69] 790ns |-----------------L0.1885-----------------| "
- - "L0.1886[70,84] 790ns |-----------------L0.1886-----------------| "
- - "L0.1890[55,69] 791ns |-----------------L0.1890-----------------| "
- - "L0.1891[70,84] 791ns |-----------------L0.1891-----------------| "
- - "L0.1895[55,69] 792ns |-----------------L0.1895-----------------| "
- - "L0.1896[70,84] 792ns |-----------------L0.1896-----------------| "
- - "L0.1900[55,69] 793ns |-----------------L0.1900-----------------| "
- - "L0.1901[70,84] 793ns |-----------------L0.1901-----------------| "
- - "L0.1905[55,69] 794ns |-----------------L0.1905-----------------| "
- - "L0.1906[70,84] 794ns |-----------------L0.1906-----------------| "
- - "L0.1910[55,69] 795ns |-----------------L0.1910-----------------| "
- - "L0.1911[70,84] 795ns |-----------------L0.1911-----------------| "
- - "L0.1915[55,69] 796ns |-----------------L0.1915-----------------| "
- - "L0.1916[70,84] 796ns |-----------------L0.1916-----------------| "
- - "L0.1920[55,69] 797ns |-----------------L0.1920-----------------| "
- - "L0.1921[70,84] 797ns |-----------------L0.1921-----------------| "
- - "L0.1925[55,69] 798ns |-----------------L0.1925-----------------| "
- - "L0.1926[70,84] 798ns |-----------------L0.1926-----------------| "
- - "L0.1930[55,69] 799ns |-----------------L0.1930-----------------| "
- - "L0.1931[70,84] 799ns |-----------------L0.1931-----------------| "
- - "L0.1935[55,69] 800ns |-----------------L0.1935-----------------| "
- - "L0.1936[70,84] 800ns |-----------------L0.1936-----------------| "
- - "L0.1940[55,69] 801ns |-----------------L0.1940-----------------| "
- - "L0.1941[70,84] 801ns |-----------------L0.1941-----------------| "
- - "L0.1945[55,69] 802ns |-----------------L0.1945-----------------| "
- - "L0.1946[70,84] 802ns |-----------------L0.1946-----------------| "
- - "L0.1950[55,69] 803ns |-----------------L0.1950-----------------| "
- - "L0.1951[70,84] 803ns |-----------------L0.1951-----------------| "
- - "L0.1955[55,69] 804ns |-----------------L0.1955-----------------| "
- - "L0.1956[70,84] 804ns |-----------------L0.1956-----------------| "
- - "L0.1960[55,69] 805ns |-----------------L0.1960-----------------| "
- - "L0.1961[70,84] 805ns |-----------------L0.1961-----------------| "
- - "L0.1965[55,69] 806ns |-----------------L0.1965-----------------| "
- - "L0.1966[70,84] 806ns |-----------------L0.1966-----------------| "
- - "L0.1970[55,69] 807ns |-----------------L0.1970-----------------| "
- - "L0.1971[70,84] 807ns |-----------------L0.1971-----------------| "
- - "L0.1975[55,69] 808ns |-----------------L0.1975-----------------| "
- - "L0.1976[70,84] 808ns |-----------------L0.1976-----------------| "
- - "L0.1980[55,69] 809ns |-----------------L0.1980-----------------| "
- - "L0.1981[70,84] 809ns |-----------------L0.1981-----------------| "
- - "L0.1985[55,69] 810ns |-----------------L0.1985-----------------| "
- - "L0.1986[70,84] 810ns |-----------------L0.1986-----------------| "
- - "L0.1990[55,69] 811ns |-----------------L0.1990-----------------| "
- - "L0.1991[70,84] 811ns |-----------------L0.1991-----------------| "
- - "L0.1995[55,69] 812ns |-----------------L0.1995-----------------| "
- - "L0.1996[70,84] 812ns |-----------------L0.1996-----------------| "
- - "L0.2000[55,69] 813ns |-----------------L0.2000-----------------| "
- - "L0.2001[70,84] 813ns |-----------------L0.2001-----------------| "
- - "**** 2 Output Files (parquet_file_id not yet assigned), 28mb total:"
- - "L0 "
- - "L0.?[55,78] 813ns 22mb |--------------------------------L0.?---------------------------------| "
- - "L0.?[79,84] 813ns 6mb |----L0.?-----| "
- - "Committing partition 1:"
- - " Soft Deleting 200 files: L0.1505, L0.1506, L0.1510, L0.1511, L0.1515, L0.1516, L0.1520, L0.1521, L0.1525, L0.1526, L0.1530, L0.1531, L0.1535, L0.1536, L0.1540, L0.1541, L0.1545, L0.1546, L0.1550, L0.1551, L0.1555, L0.1556, L0.1560, L0.1561, L0.1565, L0.1566, L0.1570, L0.1571, L0.1575, L0.1576, L0.1580, L0.1581, L0.1585, L0.1586, L0.1590, L0.1591, L0.1595, L0.1596, L0.1600, L0.1601, L0.1605, L0.1606, L0.1610, L0.1611, L0.1615, L0.1616, L0.1620, L0.1621, L0.1625, L0.1626, L0.1630, L0.1631, L0.1635, L0.1636, L0.1640, L0.1641, L0.1645, L0.1646, L0.1650, L0.1651, L0.1655, L0.1656, L0.1660, L0.1661, L0.1665, L0.1666, L0.1670, L0.1671, L0.1675, L0.1676, L0.1680, L0.1681, L0.1685, L0.1686, L0.1690, L0.1691, L0.1695, L0.1696, L0.1700, L0.1701, L0.1705, L0.1706, L0.1710, L0.1711, L0.1715, L0.1716, L0.1720, L0.1721, L0.1725, L0.1726, L0.1730, L0.1731, L0.1735, L0.1736, L0.1740, L0.1741, L0.1745, L0.1746, L0.1750, L0.1751, L0.1755, L0.1756, L0.1760, L0.1761, L0.1765, L0.1766, L0.1770, L0.1771, L0.1775, L0.1776, L0.1780, L0.1781, L0.1785, L0.1786, L0.1790, L0.1791, L0.1795, L0.1796, L0.1800, L0.1801, L0.1805, L0.1806, L0.1810, L0.1811, L0.1815, L0.1816, L0.1820, L0.1821, L0.1825, L0.1826, L0.1830, L0.1831, L0.1835, L0.1836, L0.1840, L0.1841, L0.1845, L0.1846, L0.1850, L0.1851, L0.1855, L0.1856, L0.1860, L0.1861, L0.1865, L0.1866, L0.1870, L0.1871, L0.1875, L0.1876, L0.1880, L0.1881, L0.1885, L0.1886, L0.1890, L0.1891, L0.1895, L0.1896, L0.1900, L0.1901, L0.1905, L0.1906, L0.1910, L0.1911, L0.1915, L0.1916, L0.1920, L0.1921, L0.1925, L0.1926, L0.1930, L0.1931, L0.1935, L0.1936, L0.1940, L0.1941, L0.1945, L0.1946, L0.1950, L0.1951, L0.1955, L0.1956, L0.1960, L0.1961, L0.1965, L0.1966, L0.1970, L0.1971, L0.1975, L0.1976, L0.1980, L0.1981, L0.1985, L0.1986, L0.1990, L0.1991, L0.1995, L0.1996, L0.2000, L0.2001"
- - " Creating 2 files"
- - "**** Simulation run 418, type=split(CompactAndSplitOutput(ManySmallFiles))(split_times=[78]). 200 Input Files, 28mb total:"
- - "L0, all files 141kb "
- - "L0.2005[55,69] 814ns |-----------------L0.2005-----------------| "
- - "L0.2006[70,84] 814ns |-----------------L0.2006-----------------| "
- - "L0.2010[55,69] 815ns |-----------------L0.2010-----------------| "
- - "L0.2011[70,84] 815ns |-----------------L0.2011-----------------| "
- - "L0.2015[55,69] 816ns |-----------------L0.2015-----------------| "
- - "L0.2016[70,84] 816ns |-----------------L0.2016-----------------| "
- - "L0.2020[55,69] 817ns |-----------------L0.2020-----------------| "
- - "L0.2021[70,84] 817ns |-----------------L0.2021-----------------| "
- - "L0.2025[55,69] 818ns |-----------------L0.2025-----------------| "
- - "L0.2026[70,84] 818ns |-----------------L0.2026-----------------| "
- - "L0.2030[55,69] 819ns |-----------------L0.2030-----------------| "
- - "L0.2031[70,84] 819ns |-----------------L0.2031-----------------| "
- - "L0.2035[55,69] 820ns |-----------------L0.2035-----------------| "
- - "L0.2036[70,84] 820ns |-----------------L0.2036-----------------| "
- - "L0.2040[55,69] 821ns |-----------------L0.2040-----------------| "
- - "L0.2041[70,84] 821ns |-----------------L0.2041-----------------| "
- - "L0.2045[55,69] 822ns |-----------------L0.2045-----------------| "
- - "L0.2046[70,84] 822ns |-----------------L0.2046-----------------| "
- - "L0.2050[55,69] 823ns |-----------------L0.2050-----------------| "
- - "L0.2051[70,84] 823ns |-----------------L0.2051-----------------| "
- - "L0.2055[55,69] 824ns |-----------------L0.2055-----------------| "
- - "L0.2056[70,84] 824ns |-----------------L0.2056-----------------| "
- - "L0.2060[55,69] 825ns |-----------------L0.2060-----------------| "
- - "L0.2061[70,84] 825ns |-----------------L0.2061-----------------| "
- - "L0.2065[55,69] 826ns |-----------------L0.2065-----------------| "
- - "L0.2066[70,84] 826ns |-----------------L0.2066-----------------| "
- - "L0.2070[55,69] 827ns |-----------------L0.2070-----------------| "
- - "L0.2071[70,84] 827ns |-----------------L0.2071-----------------| "
- - "L0.2075[55,69] 828ns |-----------------L0.2075-----------------| "
- - "L0.2076[70,84] 828ns |-----------------L0.2076-----------------| "
- - "L0.2080[55,69] 829ns |-----------------L0.2080-----------------| "
- - "L0.2081[70,84] 829ns |-----------------L0.2081-----------------| "
- - "L0.2085[55,69] 830ns |-----------------L0.2085-----------------| "
- - "L0.2086[70,84] 830ns |-----------------L0.2086-----------------| "
- - "L0.2090[55,69] 831ns |-----------------L0.2090-----------------| "
- - "L0.2091[70,84] 831ns |-----------------L0.2091-----------------| "
- - "L0.2095[55,69] 832ns |-----------------L0.2095-----------------| "
- - "L0.2096[70,84] 832ns |-----------------L0.2096-----------------| "
- - "L0.2100[55,69] 833ns |-----------------L0.2100-----------------| "
- - "L0.2101[70,84] 833ns |-----------------L0.2101-----------------| "
- - "L0.2105[55,69] 834ns |-----------------L0.2105-----------------| "
- - "L0.2106[70,84] 834ns |-----------------L0.2106-----------------| "
- - "L0.2110[55,69] 835ns |-----------------L0.2110-----------------| "
- - "L0.2111[70,84] 835ns |-----------------L0.2111-----------------| "
- - "L0.2115[55,69] 836ns |-----------------L0.2115-----------------| "
- - "L0.2116[70,84] 836ns |-----------------L0.2116-----------------| "
- - "L0.2120[55,69] 837ns |-----------------L0.2120-----------------| "
- - "L0.2121[70,84] 837ns |-----------------L0.2121-----------------| "
- - "L0.2125[55,69] 838ns |-----------------L0.2125-----------------| "
- - "L0.2126[70,84] 838ns |-----------------L0.2126-----------------| "
- - "L0.2130[55,69] 839ns |-----------------L0.2130-----------------| "
- - "L0.2131[70,84] 839ns |-----------------L0.2131-----------------| "
- - "L0.2135[55,69] 840ns |-----------------L0.2135-----------------| "
- - "L0.2136[70,84] 840ns |-----------------L0.2136-----------------| "
- - "L0.2140[55,69] 841ns |-----------------L0.2140-----------------| "
- - "L0.2141[70,84] 841ns |-----------------L0.2141-----------------| "
- - "L0.2145[55,69] 842ns |-----------------L0.2145-----------------| "
- - "L0.2146[70,84] 842ns |-----------------L0.2146-----------------| "
- - "L0.2150[55,69] 843ns |-----------------L0.2150-----------------| "
- - "L0.2151[70,84] 843ns |-----------------L0.2151-----------------| "
- - "L0.2155[55,69] 844ns |-----------------L0.2155-----------------| "
- - "L0.2156[70,84] 844ns |-----------------L0.2156-----------------| "
- - "L0.2160[55,69] 845ns |-----------------L0.2160-----------------| "
- - "L0.2161[70,84] 845ns |-----------------L0.2161-----------------| "
- - "L0.2165[55,69] 846ns |-----------------L0.2165-----------------| "
- - "L0.2166[70,84] 846ns |-----------------L0.2166-----------------| "
- - "L0.2170[55,69] 847ns |-----------------L0.2170-----------------| "
- - "L0.2171[70,84] 847ns |-----------------L0.2171-----------------| "
- - "L0.2175[55,69] 848ns |-----------------L0.2175-----------------| "
- - "L0.2176[70,84] 848ns |-----------------L0.2176-----------------| "
- - "L0.2180[55,69] 849ns |-----------------L0.2180-----------------| "
- - "L0.2181[70,84] 849ns |-----------------L0.2181-----------------| "
- - "L0.2185[55,69] 850ns |-----------------L0.2185-----------------| "
- - "L0.2186[70,84] 850ns |-----------------L0.2186-----------------| "
- - "L0.2190[55,69] 851ns |-----------------L0.2190-----------------| "
- - "L0.2191[70,84] 851ns |-----------------L0.2191-----------------| "
- - "L0.2195[55,69] 852ns |-----------------L0.2195-----------------| "
- - "L0.2196[70,84] 852ns |-----------------L0.2196-----------------| "
- - "L0.2200[55,69] 853ns |-----------------L0.2200-----------------| "
- - "L0.2201[70,84] 853ns |-----------------L0.2201-----------------| "
- - "L0.2205[55,69] 854ns |-----------------L0.2205-----------------| "
- - "L0.2206[70,84] 854ns |-----------------L0.2206-----------------| "
- - "L0.2210[55,69] 855ns |-----------------L0.2210-----------------| "
- - "L0.2211[70,84] 855ns |-----------------L0.2211-----------------| "
- - "L0.2215[55,69] 856ns |-----------------L0.2215-----------------| "
- - "L0.2216[70,84] 856ns |-----------------L0.2216-----------------| "
- - "L0.2220[55,69] 857ns |-----------------L0.2220-----------------| "
- - "L0.2221[70,84] 857ns |-----------------L0.2221-----------------| "
- - "L0.2225[55,69] 858ns |-----------------L0.2225-----------------| "
- - "L0.2226[70,84] 858ns |-----------------L0.2226-----------------| "
- - "L0.2230[55,69] 859ns |-----------------L0.2230-----------------| "
- - "L0.2231[70,84] 859ns |-----------------L0.2231-----------------| "
- - "L0.2235[55,69] 860ns |-----------------L0.2235-----------------| "
- - "L0.2236[70,84] 860ns |-----------------L0.2236-----------------| "
- - "L0.2240[55,69] 861ns |-----------------L0.2240-----------------| "
- - "L0.2241[70,84] 861ns |-----------------L0.2241-----------------| "
- - "L0.2245[55,69] 862ns |-----------------L0.2245-----------------| "
- - "L0.2246[70,84] 862ns |-----------------L0.2246-----------------| "
- - "L0.2250[55,69] 863ns |-----------------L0.2250-----------------| "
- - "L0.2251[70,84] 863ns |-----------------L0.2251-----------------| "
- - "L0.2255[55,69] 864ns |-----------------L0.2255-----------------| "
- - "L0.2256[70,84] 864ns |-----------------L0.2256-----------------| "
- - "L0.2260[55,69] 865ns |-----------------L0.2260-----------------| "
- - "L0.2261[70,84] 865ns |-----------------L0.2261-----------------| "
- - "L0.2265[55,69] 866ns |-----------------L0.2265-----------------| "
- - "L0.2266[70,84] 866ns |-----------------L0.2266-----------------| "
- - "L0.2270[55,69] 867ns |-----------------L0.2270-----------------| "
- - "L0.2271[70,84] 867ns |-----------------L0.2271-----------------| "
- - "L0.2275[55,69] 868ns |-----------------L0.2275-----------------| "
- - "L0.2276[70,84] 868ns |-----------------L0.2276-----------------| "
- - "L0.2280[55,69] 869ns |-----------------L0.2280-----------------| "
- - "L0.2281[70,84] 869ns |-----------------L0.2281-----------------| "
- - "L0.2285[55,69] 870ns |-----------------L0.2285-----------------| "
- - "L0.2286[70,84] 870ns |-----------------L0.2286-----------------| "
- - "L0.2290[55,69] 871ns |-----------------L0.2290-----------------| "
- - "L0.2291[70,84] 871ns |-----------------L0.2291-----------------| "
- - "L0.2295[55,69] 872ns |-----------------L0.2295-----------------| "
- - "L0.2296[70,84] 872ns |-----------------L0.2296-----------------| "
- - "L0.2300[55,69] 873ns |-----------------L0.2300-----------------| "
- - "L0.2301[70,84] 873ns |-----------------L0.2301-----------------| "
- - "L0.2305[55,69] 874ns |-----------------L0.2305-----------------| "
- - "L0.2306[70,84] 874ns |-----------------L0.2306-----------------| "
- - "L0.2310[55,69] 875ns |-----------------L0.2310-----------------| "
- - "L0.2311[70,84] 875ns |-----------------L0.2311-----------------| "
- - "L0.2315[55,69] 876ns |-----------------L0.2315-----------------| "
- - "L0.2316[70,84] 876ns |-----------------L0.2316-----------------| "
- - "L0.2320[55,69] 877ns |-----------------L0.2320-----------------| "
- - "L0.2321[70,84] 877ns |-----------------L0.2321-----------------| "
- - "L0.2325[55,69] 878ns |-----------------L0.2325-----------------| "
- - "L0.2326[70,84] 878ns |-----------------L0.2326-----------------| "
- - "L0.2330[55,69] 879ns |-----------------L0.2330-----------------| "
- - "L0.2331[70,84] 879ns |-----------------L0.2331-----------------| "
- - "L0.2335[55,69] 880ns |-----------------L0.2335-----------------| "
- - "L0.2336[70,84] 880ns |-----------------L0.2336-----------------| "
- - "L0.2340[55,69] 881ns |-----------------L0.2340-----------------| "
- - "L0.2341[70,84] 881ns |-----------------L0.2341-----------------| "
- - "L0.2345[55,69] 882ns |-----------------L0.2345-----------------| "
- - "L0.2346[70,84] 882ns |-----------------L0.2346-----------------| "
- - "L0.2350[55,69] 883ns |-----------------L0.2350-----------------| "
- - "L0.2351[70,84] 883ns |-----------------L0.2351-----------------| "
- - "L0.2355[55,69] 884ns |-----------------L0.2355-----------------| "
- - "L0.2356[70,84] 884ns |-----------------L0.2356-----------------| "
- - "L0.2360[55,69] 885ns |-----------------L0.2360-----------------| "
- - "L0.2361[70,84] 885ns |-----------------L0.2361-----------------| "
- - "L0.2365[55,69] 886ns |-----------------L0.2365-----------------| "
- - "L0.2366[70,84] 886ns |-----------------L0.2366-----------------| "
- - "L0.2370[55,69] 887ns |-----------------L0.2370-----------------| "
- - "L0.2371[70,84] 887ns |-----------------L0.2371-----------------| "
- - "L0.2375[55,69] 888ns |-----------------L0.2375-----------------| "
- - "L0.2376[70,84] 888ns |-----------------L0.2376-----------------| "
- - "L0.2380[55,69] 889ns |-----------------L0.2380-----------------| "
- - "L0.2381[70,84] 889ns |-----------------L0.2381-----------------| "
- - "L0.2385[55,69] 890ns |-----------------L0.2385-----------------| "
- - "L0.2386[70,84] 890ns |-----------------L0.2386-----------------| "
- - "L0.2390[55,69] 891ns |-----------------L0.2390-----------------| "
- - "L0.2391[70,84] 891ns |-----------------L0.2391-----------------| "
- - "L0.2395[55,69] 892ns |-----------------L0.2395-----------------| "
- - "L0.2396[70,84] 892ns |-----------------L0.2396-----------------| "
- - "L0.2400[55,69] 893ns |-----------------L0.2400-----------------| "
- - "L0.2401[70,84] 893ns |-----------------L0.2401-----------------| "
- - "L0.2405[55,69] 894ns |-----------------L0.2405-----------------| "
- - "L0.2406[70,84] 894ns |-----------------L0.2406-----------------| "
- - "L0.2410[55,69] 895ns |-----------------L0.2410-----------------| "
- - "L0.2411[70,84] 895ns |-----------------L0.2411-----------------| "
- - "L0.2415[55,69] 896ns |-----------------L0.2415-----------------| "
- - "L0.2416[70,84] 896ns |-----------------L0.2416-----------------| "
- - "L0.2420[55,69] 897ns |-----------------L0.2420-----------------| "
- - "L0.2421[70,84] 897ns |-----------------L0.2421-----------------| "
- - "L0.2425[55,69] 898ns |-----------------L0.2425-----------------| "
- - "L0.2426[70,84] 898ns |-----------------L0.2426-----------------| "
- - "L0.2430[55,69] 899ns |-----------------L0.2430-----------------| "
- - "L0.2431[70,84] 899ns |-----------------L0.2431-----------------| "
- - "L0.2435[55,69] 900ns |-----------------L0.2435-----------------| "
- - "L0.2436[70,84] 900ns |-----------------L0.2436-----------------| "
- - "L0.2440[55,69] 901ns |-----------------L0.2440-----------------| "
- - "L0.2441[70,84] 901ns |-----------------L0.2441-----------------| "
- - "L0.2445[55,69] 902ns |-----------------L0.2445-----------------| "
- - "L0.2446[70,84] 902ns |-----------------L0.2446-----------------| "
- - "L0.2450[55,69] 903ns |-----------------L0.2450-----------------| "
- - "L0.2451[70,84] 903ns |-----------------L0.2451-----------------| "
- - "L0.2495[55,69] 904ns |-----------------L0.2495-----------------| "
- - "L0.2496[70,84] 904ns |-----------------L0.2496-----------------| "
- - "L0.2500[55,69] 905ns |-----------------L0.2500-----------------| "
- - "L0.2501[70,84] 905ns |-----------------L0.2501-----------------| "
- - "L0.2455[55,69] 906ns |-----------------L0.2455-----------------| "
- - "L0.2456[70,84] 906ns |-----------------L0.2456-----------------| "
- - "L0.2460[55,69] 907ns |-----------------L0.2460-----------------| "
- - "L0.2461[70,84] 907ns |-----------------L0.2461-----------------| "
- - "L0.2465[55,69] 908ns |-----------------L0.2465-----------------| "
- - "L0.2466[70,84] 908ns |-----------------L0.2466-----------------| "
- - "L0.2470[55,69] 909ns |-----------------L0.2470-----------------| "
- - "L0.2471[70,84] 909ns |-----------------L0.2471-----------------| "
- - "L0.2475[55,69] 910ns |-----------------L0.2475-----------------| "
- - "L0.2476[70,84] 910ns |-----------------L0.2476-----------------| "
- - "L0.2480[55,69] 911ns |-----------------L0.2480-----------------| "
- - "L0.2481[70,84] 911ns |-----------------L0.2481-----------------| "
- - "L0.2485[55,69] 912ns |-----------------L0.2485-----------------| "
- - "L0.2486[70,84] 912ns |-----------------L0.2486-----------------| "
- - "L0.2490[55,69] 913ns |-----------------L0.2490-----------------| "
- - "L0.2491[70,84] 913ns |-----------------L0.2491-----------------| "
- - "**** 2 Output Files (parquet_file_id not yet assigned), 28mb total:"
- - "L0 "
- - "L0.?[55,78] 913ns 22mb |--------------------------------L0.?---------------------------------| "
- - "L0.?[79,84] 913ns 6mb |----L0.?-----| "
- - "Committing partition 1:"
- - " Soft Deleting 200 files: L0.2005, L0.2006, L0.2010, L0.2011, L0.2015, L0.2016, L0.2020, L0.2021, L0.2025, L0.2026, L0.2030, L0.2031, L0.2035, L0.2036, L0.2040, L0.2041, L0.2045, L0.2046, L0.2050, L0.2051, L0.2055, L0.2056, L0.2060, L0.2061, L0.2065, L0.2066, L0.2070, L0.2071, L0.2075, L0.2076, L0.2080, L0.2081, L0.2085, L0.2086, L0.2090, L0.2091, L0.2095, L0.2096, L0.2100, L0.2101, L0.2105, L0.2106, L0.2110, L0.2111, L0.2115, L0.2116, L0.2120, L0.2121, L0.2125, L0.2126, L0.2130, L0.2131, L0.2135, L0.2136, L0.2140, L0.2141, L0.2145, L0.2146, L0.2150, L0.2151, L0.2155, L0.2156, L0.2160, L0.2161, L0.2165, L0.2166, L0.2170, L0.2171, L0.2175, L0.2176, L0.2180, L0.2181, L0.2185, L0.2186, L0.2190, L0.2191, L0.2195, L0.2196, L0.2200, L0.2201, L0.2205, L0.2206, L0.2210, L0.2211, L0.2215, L0.2216, L0.2220, L0.2221, L0.2225, L0.2226, L0.2230, L0.2231, L0.2235, L0.2236, L0.2240, L0.2241, L0.2245, L0.2246, L0.2250, L0.2251, L0.2255, L0.2256, L0.2260, L0.2261, L0.2265, L0.2266, L0.2270, L0.2271, L0.2275, L0.2276, L0.2280, L0.2281, L0.2285, L0.2286, L0.2290, L0.2291, L0.2295, L0.2296, L0.2300, L0.2301, L0.2305, L0.2306, L0.2310, L0.2311, L0.2315, L0.2316, L0.2320, L0.2321, L0.2325, L0.2326, L0.2330, L0.2331, L0.2335, L0.2336, L0.2340, L0.2341, L0.2345, L0.2346, L0.2350, L0.2351, L0.2355, L0.2356, L0.2360, L0.2361, L0.2365, L0.2366, L0.2370, L0.2371, L0.2375, L0.2376, L0.2380, L0.2381, L0.2385, L0.2386, L0.2390, L0.2391, L0.2395, L0.2396, L0.2400, L0.2401, L0.2405, L0.2406, L0.2410, L0.2411, L0.2415, L0.2416, L0.2420, L0.2421, L0.2425, L0.2426, L0.2430, L0.2431, L0.2435, L0.2436, L0.2440, L0.2441, L0.2445, L0.2446, L0.2450, L0.2451, L0.2455, L0.2456, L0.2460, L0.2461, L0.2465, L0.2466, L0.2470, L0.2471, L0.2475, L0.2476, L0.2480, L0.2481, L0.2485, L0.2486, L0.2490, L0.2491, L0.2495, L0.2496, L0.2500, L0.2501"
- - " Creating 2 files"
- - "**** Simulation run 419, type=split(CompactAndSplitOutput(ManySmallFiles))(split_times=[78]). 200 Input Files, 28mb total:"
- - "L0, all files 141kb "
- - "L0.2505[55,69] 914ns |-----------------L0.2505-----------------| "
- - "L0.2506[70,84] 914ns |-----------------L0.2506-----------------| "
- - "L0.2510[55,69] 915ns |-----------------L0.2510-----------------| "
- - "L0.2511[70,84] 915ns |-----------------L0.2511-----------------| "
- - "L0.2515[55,69] 916ns |-----------------L0.2515-----------------| "
- - "L0.2516[70,84] 916ns |-----------------L0.2516-----------------| "
- - "L0.2520[55,69] 917ns |-----------------L0.2520-----------------| "
- - "L0.2521[70,84] 917ns |-----------------L0.2521-----------------| "
- - "L0.2525[55,69] 918ns |-----------------L0.2525-----------------| "
- - "L0.2526[70,84] 918ns |-----------------L0.2526-----------------| "
- - "L0.2530[55,69] 919ns |-----------------L0.2530-----------------| "
- - "L0.2531[70,84] 919ns |-----------------L0.2531-----------------| "
- - "L0.2535[55,69] 920ns |-----------------L0.2535-----------------| "
- - "L0.2536[70,84] 920ns |-----------------L0.2536-----------------| "
- - "L0.2540[55,69] 921ns |-----------------L0.2540-----------------| "
- - "L0.2541[70,84] 921ns |-----------------L0.2541-----------------| "
- - "L0.2545[55,69] 922ns |-----------------L0.2545-----------------| "
- - "L0.2546[70,84] 922ns |-----------------L0.2546-----------------| "
- - "L0.2550[55,69] 923ns |-----------------L0.2550-----------------| "
- - "L0.2551[70,84] 923ns |-----------------L0.2551-----------------| "
- - "L0.2555[55,69] 924ns |-----------------L0.2555-----------------| "
- - "L0.2556[70,84] 924ns |-----------------L0.2556-----------------| "
- - "L0.2560[55,69] 925ns |-----------------L0.2560-----------------| "
- - "L0.2561[70,84] 925ns |-----------------L0.2561-----------------| "
- - "L0.2565[55,69] 926ns |-----------------L0.2565-----------------| "
- - "L0.2566[70,84] 926ns |-----------------L0.2566-----------------| "
- - "L0.2570[55,69] 927ns |-----------------L0.2570-----------------| "
- - "L0.2571[70,84] 927ns |-----------------L0.2571-----------------| "
- - "L0.2575[55,69] 928ns |-----------------L0.2575-----------------| "
- - "L0.2576[70,84] 928ns |-----------------L0.2576-----------------| "
- - "L0.2580[55,69] 929ns |-----------------L0.2580-----------------| "
- - "L0.2581[70,84] 929ns |-----------------L0.2581-----------------| "
- - "L0.2585[55,69] 930ns |-----------------L0.2585-----------------| "
- - "L0.2586[70,84] 930ns |-----------------L0.2586-----------------| "
- - "L0.2590[55,69] 931ns |-----------------L0.2590-----------------| "
- - "L0.2591[70,84] 931ns |-----------------L0.2591-----------------| "
- - "L0.2595[55,69] 932ns |-----------------L0.2595-----------------| "
- - "L0.2596[70,84] 932ns |-----------------L0.2596-----------------| "
- - "L0.2600[55,69] 933ns |-----------------L0.2600-----------------| "
- - "L0.2601[70,84] 933ns |-----------------L0.2601-----------------| "
- - "L0.2605[55,69] 934ns |-----------------L0.2605-----------------| "
- - "L0.2606[70,84] 934ns |-----------------L0.2606-----------------| "
- - "L0.2610[55,69] 935ns |-----------------L0.2610-----------------| "
- - "L0.2611[70,84] 935ns |-----------------L0.2611-----------------| "
- - "L0.2615[55,69] 936ns |-----------------L0.2615-----------------| "
- - "L0.2616[70,84] 936ns |-----------------L0.2616-----------------| "
- - "L0.2620[55,69] 937ns |-----------------L0.2620-----------------| "
- - "L0.2621[70,84] 937ns |-----------------L0.2621-----------------| "
- - "L0.2625[55,69] 938ns |-----------------L0.2625-----------------| "
- - "L0.2626[70,84] 938ns |-----------------L0.2626-----------------| "
- - "L0.2630[55,69] 939ns |-----------------L0.2630-----------------| "
- - "L0.2631[70,84] 939ns |-----------------L0.2631-----------------| "
- - "L0.2635[55,69] 940ns |-----------------L0.2635-----------------| "
- - "L0.2636[70,84] 940ns |-----------------L0.2636-----------------| "
- - "L0.2640[55,69] 941ns |-----------------L0.2640-----------------| "
- - "L0.2641[70,84] 941ns |-----------------L0.2641-----------------| "
- - "L0.2645[55,69] 942ns |-----------------L0.2645-----------------| "
- - "L0.2646[70,84] 942ns |-----------------L0.2646-----------------| "
- - "L0.2650[55,69] 943ns |-----------------L0.2650-----------------| "
- - "L0.2651[70,84] 943ns |-----------------L0.2651-----------------| "
- - "L0.2655[55,69] 944ns |-----------------L0.2655-----------------| "
- - "L0.2656[70,84] 944ns |-----------------L0.2656-----------------| "
- - "L0.2660[55,69] 945ns |-----------------L0.2660-----------------| "
- - "L0.2661[70,84] 945ns |-----------------L0.2661-----------------| "
- - "L0.2665[55,69] 946ns |-----------------L0.2665-----------------| "
- - "L0.2666[70,84] 946ns |-----------------L0.2666-----------------| "
- - "L0.2670[55,69] 947ns |-----------------L0.2670-----------------| "
- - "L0.2671[70,84] 947ns |-----------------L0.2671-----------------| "
- - "L0.2675[55,69] 948ns |-----------------L0.2675-----------------| "
- - "L0.2676[70,84] 948ns |-----------------L0.2676-----------------| "
- - "L0.2680[55,69] 949ns |-----------------L0.2680-----------------| "
- - "L0.2681[70,84] 949ns |-----------------L0.2681-----------------| "
- - "L0.2685[55,69] 950ns |-----------------L0.2685-----------------| "
- - "L0.2686[70,84] 950ns |-----------------L0.2686-----------------| "
- - "L0.2690[55,69] 951ns |-----------------L0.2690-----------------| "
- - "L0.2691[70,84] 951ns |-----------------L0.2691-----------------| "
- - "L0.2695[55,69] 952ns |-----------------L0.2695-----------------| "
- - "L0.2696[70,84] 952ns |-----------------L0.2696-----------------| "
- - "L0.2700[55,69] 953ns |-----------------L0.2700-----------------| "
- - "L0.2701[70,84] 953ns |-----------------L0.2701-----------------| "
- - "L0.2705[55,69] 954ns |-----------------L0.2705-----------------| "
- - "L0.2706[70,84] 954ns |-----------------L0.2706-----------------| "
- - "L0.2710[55,69] 955ns |-----------------L0.2710-----------------| "
- - "L0.2711[70,84] 955ns |-----------------L0.2711-----------------| "
- - "L0.2715[55,69] 956ns |-----------------L0.2715-----------------| "
- - "L0.2716[70,84] 956ns |-----------------L0.2716-----------------| "
- - "L0.2720[55,69] 957ns |-----------------L0.2720-----------------| "
- - "L0.2721[70,84] 957ns |-----------------L0.2721-----------------| "
- - "L0.2725[55,69] 958ns |-----------------L0.2725-----------------| "
- - "L0.2726[70,84] 958ns |-----------------L0.2726-----------------| "
- - "L0.2730[55,69] 959ns |-----------------L0.2730-----------------| "
- - "L0.2731[70,84] 959ns |-----------------L0.2731-----------------| "
- - "L0.2735[55,69] 960ns |-----------------L0.2735-----------------| "
- - "L0.2736[70,84] 960ns |-----------------L0.2736-----------------| "
- - "L0.2740[55,69] 961ns |-----------------L0.2740-----------------| "
- - "L0.2741[70,84] 961ns |-----------------L0.2741-----------------| "
- - "L0.2745[55,69] 962ns |-----------------L0.2745-----------------| "
- - "L0.2746[70,84] 962ns |-----------------L0.2746-----------------| "
- - "L0.2750[55,69] 963ns |-----------------L0.2750-----------------| "
- - "L0.2751[70,84] 963ns |-----------------L0.2751-----------------| "
- - "L0.2755[55,69] 964ns |-----------------L0.2755-----------------| "
- - "L0.2756[70,84] 964ns |-----------------L0.2756-----------------| "
- - "L0.2760[55,69] 965ns |-----------------L0.2760-----------------| "
- - "L0.2761[70,84] 965ns |-----------------L0.2761-----------------| "
- - "L0.2765[55,69] 966ns |-----------------L0.2765-----------------| "
- - "L0.2766[70,84] 966ns |-----------------L0.2766-----------------| "
- - "L0.2770[55,69] 967ns |-----------------L0.2770-----------------| "
- - "L0.2771[70,84] 967ns |-----------------L0.2771-----------------| "
- - "L0.2775[55,69] 968ns |-----------------L0.2775-----------------| "
- - "L0.2776[70,84] 968ns |-----------------L0.2776-----------------| "
- - "L0.2780[55,69] 969ns |-----------------L0.2780-----------------| "
- - "L0.2781[70,84] 969ns |-----------------L0.2781-----------------| "
- - "L0.2785[55,69] 970ns |-----------------L0.2785-----------------| "
- - "L0.2786[70,84] 970ns |-----------------L0.2786-----------------| "
- - "L0.2790[55,69] 971ns |-----------------L0.2790-----------------| "
- - "L0.2791[70,84] 971ns |-----------------L0.2791-----------------| "
- - "L0.2795[55,69] 972ns |-----------------L0.2795-----------------| "
- - "L0.2796[70,84] 972ns |-----------------L0.2796-----------------| "
- - "L0.2800[55,69] 973ns |-----------------L0.2800-----------------| "
- - "L0.2801[70,84] 973ns |-----------------L0.2801-----------------| "
- - "L0.2805[55,69] 974ns |-----------------L0.2805-----------------| "
- - "L0.2806[70,84] 974ns |-----------------L0.2806-----------------| "
- - "L0.2810[55,69] 975ns |-----------------L0.2810-----------------| "
- - "L0.2811[70,84] 975ns |-----------------L0.2811-----------------| "
- - "L0.2815[55,69] 976ns |-----------------L0.2815-----------------| "
- - "L0.2816[70,84] 976ns |-----------------L0.2816-----------------| "
- - "L0.2820[55,69] 977ns |-----------------L0.2820-----------------| "
- - "L0.2821[70,84] 977ns |-----------------L0.2821-----------------| "
- - "L0.2825[55,69] 978ns |-----------------L0.2825-----------------| "
- - "L0.2826[70,84] 978ns |-----------------L0.2826-----------------| "
- - "L0.2830[55,69] 979ns |-----------------L0.2830-----------------| "
- - "L0.2831[70,84] 979ns |-----------------L0.2831-----------------| "
- - "L0.2835[55,69] 980ns |-----------------L0.2835-----------------| "
- - "L0.2836[70,84] 980ns |-----------------L0.2836-----------------| "
- - "L0.2840[55,69] 981ns |-----------------L0.2840-----------------| "
- - "L0.2841[70,84] 981ns |-----------------L0.2841-----------------| "
- - "L0.2845[55,69] 982ns |-----------------L0.2845-----------------| "
- - "L0.2846[70,84] 982ns |-----------------L0.2846-----------------| "
- - "L0.2850[55,69] 983ns |-----------------L0.2850-----------------| "
- - "L0.2851[70,84] 983ns |-----------------L0.2851-----------------| "
- - "L0.2855[55,69] 984ns |-----------------L0.2855-----------------| "
- - "L0.2856[70,84] 984ns |-----------------L0.2856-----------------| "
- - "L0.2860[55,69] 985ns |-----------------L0.2860-----------------| "
- - "L0.2861[70,84] 985ns |-----------------L0.2861-----------------| "
- - "L0.2865[55,69] 986ns |-----------------L0.2865-----------------| "
- - "L0.2866[70,84] 986ns |-----------------L0.2866-----------------| "
- - "L0.2870[55,69] 987ns |-----------------L0.2870-----------------| "
- - "L0.2871[70,84] 987ns |-----------------L0.2871-----------------| "
- - "L0.2875[55,69] 988ns |-----------------L0.2875-----------------| "
- - "L0.2876[70,84] 988ns |-----------------L0.2876-----------------| "
- - "L0.2880[55,69] 989ns |-----------------L0.2880-----------------| "
- - "L0.2881[70,84] 989ns |-----------------L0.2881-----------------| "
- - "L0.2885[55,69] 990ns |-----------------L0.2885-----------------| "
- - "L0.2886[70,84] 990ns |-----------------L0.2886-----------------| "
- - "L0.2890[55,69] 991ns |-----------------L0.2890-----------------| "
- - "L0.2891[70,84] 991ns |-----------------L0.2891-----------------| "
- - "L0.2895[55,69] 992ns |-----------------L0.2895-----------------| "
- - "L0.2896[70,84] 992ns |-----------------L0.2896-----------------| "
- - "L0.2900[55,69] 993ns |-----------------L0.2900-----------------| "
- - "L0.2901[70,84] 993ns |-----------------L0.2901-----------------| "
- - "L0.2905[55,69] 994ns |-----------------L0.2905-----------------| "
- - "L0.2906[70,84] 994ns |-----------------L0.2906-----------------| "
- - "L0.2910[55,69] 995ns |-----------------L0.2910-----------------| "
- - "L0.2911[70,84] 995ns |-----------------L0.2911-----------------| "
- - "L0.2915[55,69] 996ns |-----------------L0.2915-----------------| "
- - "L0.2916[70,84] 996ns |-----------------L0.2916-----------------| "
- - "L0.2920[55,69] 997ns |-----------------L0.2920-----------------| "
- - "L0.2921[70,84] 997ns |-----------------L0.2921-----------------| "
- - "L0.2925[55,69] 998ns |-----------------L0.2925-----------------| "
- - "L0.2926[70,84] 998ns |-----------------L0.2926-----------------| "
- - "L0.2930[55,69] 999ns |-----------------L0.2930-----------------| "
- - "L0.2931[70,84] 999ns |-----------------L0.2931-----------------| "
- - "L0.2935[55,69] 1us |-----------------L0.2935-----------------| "
- - "L0.2936[70,84] 1us |-----------------L0.2936-----------------| "
- - "L0.2940[55,69] 1us |-----------------L0.2940-----------------| "
- - "L0.2941[70,84] 1us |-----------------L0.2941-----------------| "
- - "L0.2945[55,69] 1us |-----------------L0.2945-----------------| "
- - "L0.2946[70,84] 1us |-----------------L0.2946-----------------| "
- - "L0.2950[55,69] 1us |-----------------L0.2950-----------------| "
- - "L0.2951[70,84] 1us |-----------------L0.2951-----------------| "
- - "L0.2955[55,69] 1us |-----------------L0.2955-----------------| "
- - "L0.2956[70,84] 1us |-----------------L0.2956-----------------| "
- - "L0.2960[55,69] 1us |-----------------L0.2960-----------------| "
- - "L0.2961[70,84] 1us |-----------------L0.2961-----------------| "
- - "L0.2965[55,69] 1.01us |-----------------L0.2965-----------------| "
- - "L0.2966[70,84] 1.01us |-----------------L0.2966-----------------| "
- - "L0.2970[55,69] 1.01us |-----------------L0.2970-----------------| "
- - "L0.2971[70,84] 1.01us |-----------------L0.2971-----------------| "
- - "L0.2975[55,69] 1.01us |-----------------L0.2975-----------------| "
- - "L0.2976[70,84] 1.01us |-----------------L0.2976-----------------| "
- - "L0.2980[55,69] 1.01us |-----------------L0.2980-----------------| "
- - "L0.2981[70,84] 1.01us |-----------------L0.2981-----------------| "
- - "L0.2985[55,69] 1.01us |-----------------L0.2985-----------------| "
- - "L0.2986[70,84] 1.01us |-----------------L0.2986-----------------| "
- - "L0.2990[55,69] 1.01us |-----------------L0.2990-----------------| "
- - "L0.2991[70,84] 1.01us |-----------------L0.2991-----------------| "
- - "L0.2995[55,69] 1.01us |-----------------L0.2995-----------------| "
- - "L0.2996[70,84] 1.01us |-----------------L0.2996-----------------| "
- - "L0.3000[55,69] 1.01us |-----------------L0.3000-----------------| "
- - "L0.3001[70,84] 1.01us |-----------------L0.3001-----------------| "
- - "**** 2 Output Files (parquet_file_id not yet assigned), 28mb total:"
- - "L0 "
- - "L0.?[55,78] 1.01us 22mb |--------------------------------L0.?---------------------------------| "
- - "L0.?[79,84] 1.01us 6mb |----L0.?-----| "
- - "Committing partition 1:"
- - " Soft Deleting 200 files: L0.2505, L0.2506, L0.2510, L0.2511, L0.2515, L0.2516, L0.2520, L0.2521, L0.2525, L0.2526, L0.2530, L0.2531, L0.2535, L0.2536, L0.2540, L0.2541, L0.2545, L0.2546, L0.2550, L0.2551, L0.2555, L0.2556, L0.2560, L0.2561, L0.2565, L0.2566, L0.2570, L0.2571, L0.2575, L0.2576, L0.2580, L0.2581, L0.2585, L0.2586, L0.2590, L0.2591, L0.2595, L0.2596, L0.2600, L0.2601, L0.2605, L0.2606, L0.2610, L0.2611, L0.2615, L0.2616, L0.2620, L0.2621, L0.2625, L0.2626, L0.2630, L0.2631, L0.2635, L0.2636, L0.2640, L0.2641, L0.2645, L0.2646, L0.2650, L0.2651, L0.2655, L0.2656, L0.2660, L0.2661, L0.2665, L0.2666, L0.2670, L0.2671, L0.2675, L0.2676, L0.2680, L0.2681, L0.2685, L0.2686, L0.2690, L0.2691, L0.2695, L0.2696, L0.2700, L0.2701, L0.2705, L0.2706, L0.2710, L0.2711, L0.2715, L0.2716, L0.2720, L0.2721, L0.2725, L0.2726, L0.2730, L0.2731, L0.2735, L0.2736, L0.2740, L0.2741, L0.2745, L0.2746, L0.2750, L0.2751, L0.2755, L0.2756, L0.2760, L0.2761, L0.2765, L0.2766, L0.2770, L0.2771, L0.2775, L0.2776, L0.2780, L0.2781, L0.2785, L0.2786, L0.2790, L0.2791, L0.2795, L0.2796, L0.2800, L0.2801, L0.2805, L0.2806, L0.2810, L0.2811, L0.2815, L0.2816, L0.2820, L0.2821, L0.2825, L0.2826, L0.2830, L0.2831, L0.2835, L0.2836, L0.2840, L0.2841, L0.2845, L0.2846, L0.2850, L0.2851, L0.2855, L0.2856, L0.2860, L0.2861, L0.2865, L0.2866, L0.2870, L0.2871, L0.2875, L0.2876, L0.2880, L0.2881, L0.2885, L0.2886, L0.2890, L0.2891, L0.2895, L0.2896, L0.2900, L0.2901, L0.2905, L0.2906, L0.2910, L0.2911, L0.2915, L0.2916, L0.2920, L0.2921, L0.2925, L0.2926, L0.2930, L0.2931, L0.2935, L0.2936, L0.2940, L0.2941, L0.2945, L0.2946, L0.2950, L0.2951, L0.2955, L0.2956, L0.2960, L0.2961, L0.2965, L0.2966, L0.2970, L0.2971, L0.2975, L0.2976, L0.2980, L0.2981, L0.2985, L0.2986, L0.2990, L0.2991, L0.2995, L0.2996, L0.3000, L0.3001"
- - " Creating 2 files"
- - "**** Simulation run 420, type=compact(ManySmallFiles). 14 Input Files, 2mb total:"
- - "L0, all files 141kb "
- - "L0.3005[55,69] 1.01us |-----------------L0.3005-----------------| "
- - "L0.3006[70,84] 1.01us |-----------------L0.3006-----------------| "
- - "L0.3010[55,69] 1.01us |-----------------L0.3010-----------------| "
- - "L0.3011[70,84] 1.01us |-----------------L0.3011-----------------| "
- - "L0.3015[55,69] 1.02us |-----------------L0.3015-----------------| "
- - "L0.3016[70,84] 1.02us |-----------------L0.3016-----------------| "
- - "L0.3020[55,69] 1.02us |-----------------L0.3020-----------------| "
- - "L0.3021[70,84] 1.02us |-----------------L0.3021-----------------| "
- - "L0.3025[55,69] 1.02us |-----------------L0.3025-----------------| "
- - "L0.3026[70,84] 1.02us |-----------------L0.3026-----------------| "
- - "L0.3030[55,69] 1.02us |-----------------L0.3030-----------------| "
- - "L0.3031[70,84] 1.02us |-----------------L0.3031-----------------| "
- - "L0.3035[55,69] 1.02us |-----------------L0.3035-----------------| "
- - "L0.3036[70,84] 1.02us |-----------------L0.3036-----------------| "
- - "**** 1 Output Files (parquet_file_id not yet assigned), 2mb total:"
- - "L0, all files 2mb "
- - "L0.?[55,84] 1.02us |------------------------------------------L0.?------------------------------------------|"
- - "Committing partition 1:"
- - " Soft Deleting 14 files: L0.3005, L0.3006, L0.3010, L0.3011, L0.3015, L0.3016, L0.3020, L0.3021, L0.3025, L0.3026, L0.3030, L0.3031, L0.3035, L0.3036"
- - " Creating 1 files"
- - "**** Simulation run 421, type=split(CompactAndSplitOutput(ManySmallFiles))(split_times=[48]). 200 Input Files, 29mb total:"
- - "L0 "
- - "L0.1504[40,54] 714ns 141kb |----------------L0.1504-----------------|"
- - "L0.1508[24,39] 715ns 152kb|------------------L0.1508------------------| "
- - "L0.1509[40,54] 715ns 141kb |----------------L0.1509-----------------|"
- - "L0.1513[24,39] 716ns 152kb|------------------L0.1513------------------| "
- - "L0.1514[40,54] 716ns 141kb |----------------L0.1514-----------------|"
- - "L0.1518[24,39] 717ns 152kb|------------------L0.1518------------------| "
- - "L0.1519[40,54] 717ns 141kb |----------------L0.1519-----------------|"
- - "L0.1523[24,39] 718ns 152kb|------------------L0.1523------------------| "
- - "L0.1524[40,54] 718ns 141kb |----------------L0.1524-----------------|"
- - "L0.1528[24,39] 719ns 152kb|------------------L0.1528------------------| "
- - "L0.1529[40,54] 719ns 141kb |----------------L0.1529-----------------|"
- - "L0.1533[24,39] 720ns 152kb|------------------L0.1533------------------| "
- - "L0.1534[40,54] 720ns 141kb |----------------L0.1534-----------------|"
- - "L0.1538[24,39] 721ns 152kb|------------------L0.1538------------------| "
- - "L0.1539[40,54] 721ns 141kb |----------------L0.1539-----------------|"
- - "L0.1543[24,39] 722ns 152kb|------------------L0.1543------------------| "
- - "L0.1544[40,54] 722ns 141kb |----------------L0.1544-----------------|"
- - "L0.1548[24,39] 723ns 152kb|------------------L0.1548------------------| "
- - "L0.1549[40,54] 723ns 141kb |----------------L0.1549-----------------|"
- - "L0.1553[24,39] 724ns 152kb|------------------L0.1553------------------| "
- - "L0.1554[40,54] 724ns 141kb |----------------L0.1554-----------------|"
- - "L0.1558[24,39] 725ns 152kb|------------------L0.1558------------------| "
- - "L0.1559[40,54] 725ns 141kb |----------------L0.1559-----------------|"
- - "L0.1563[24,39] 726ns 152kb|------------------L0.1563------------------| "
- - "L0.1564[40,54] 726ns 141kb |----------------L0.1564-----------------|"
- - "L0.1568[24,39] 727ns 152kb|------------------L0.1568------------------| "
- - "L0.1569[40,54] 727ns 141kb |----------------L0.1569-----------------|"
- - "L0.1573[24,39] 728ns 152kb|------------------L0.1573------------------| "
- - "L0.1574[40,54] 728ns 141kb |----------------L0.1574-----------------|"
- - "L0.1578[24,39] 729ns 152kb|------------------L0.1578------------------| "
- - "L0.1579[40,54] 729ns 141kb |----------------L0.1579-----------------|"
- - "L0.1583[24,39] 730ns 152kb|------------------L0.1583------------------| "
- - "L0.1584[40,54] 730ns 141kb |----------------L0.1584-----------------|"
- - "L0.1588[24,39] 731ns 152kb|------------------L0.1588------------------| "
- - "L0.1589[40,54] 731ns 141kb |----------------L0.1589-----------------|"
- - "L0.1593[24,39] 732ns 152kb|------------------L0.1593------------------| "
- - "L0.1594[40,54] 732ns 141kb |----------------L0.1594-----------------|"
- - "L0.1598[24,39] 733ns 152kb|------------------L0.1598------------------| "
- - "L0.1599[40,54] 733ns 141kb |----------------L0.1599-----------------|"
- - "L0.1603[24,39] 734ns 152kb|------------------L0.1603------------------| "
- - "L0.1604[40,54] 734ns 141kb |----------------L0.1604-----------------|"
- - "L0.1608[24,39] 735ns 152kb|------------------L0.1608------------------| "
- - "L0.1609[40,54] 735ns 141kb |----------------L0.1609-----------------|"
- - "L0.1613[24,39] 736ns 152kb|------------------L0.1613------------------| "
- - "L0.1614[40,54] 736ns 141kb |----------------L0.1614-----------------|"
- - "L0.1618[24,39] 737ns 152kb|------------------L0.1618------------------| "
- - "L0.1619[40,54] 737ns 141kb |----------------L0.1619-----------------|"
- - "L0.1623[24,39] 738ns 152kb|------------------L0.1623------------------| "
- - "L0.1624[40,54] 738ns 141kb |----------------L0.1624-----------------|"
- - "L0.1628[24,39] 739ns 152kb|------------------L0.1628------------------| "
- - "L0.1629[40,54] 739ns 141kb |----------------L0.1629-----------------|"
- - "L0.1633[24,39] 740ns 152kb|------------------L0.1633------------------| "
- - "L0.1634[40,54] 740ns 141kb |----------------L0.1634-----------------|"
- - "L0.1638[24,39] 741ns 152kb|------------------L0.1638------------------| "
- - "L0.1639[40,54] 741ns 141kb |----------------L0.1639-----------------|"
- - "L0.1643[24,39] 742ns 152kb|------------------L0.1643------------------| "
- - "L0.1644[40,54] 742ns 141kb |----------------L0.1644-----------------|"
- - "L0.1648[24,39] 743ns 152kb|------------------L0.1648------------------| "
- - "L0.1649[40,54] 743ns 141kb |----------------L0.1649-----------------|"
- - "L0.1653[24,39] 744ns 152kb|------------------L0.1653------------------| "
- - "L0.1654[40,54] 744ns 141kb |----------------L0.1654-----------------|"
- - "L0.1658[24,39] 745ns 152kb|------------------L0.1658------------------| "
- - "L0.1659[40,54] 745ns 141kb |----------------L0.1659-----------------|"
- - "L0.1663[24,39] 746ns 152kb|------------------L0.1663------------------| "
- - "L0.1664[40,54] 746ns 141kb |----------------L0.1664-----------------|"
- - "L0.1668[24,39] 747ns 152kb|------------------L0.1668------------------| "
- - "L0.1669[40,54] 747ns 141kb |----------------L0.1669-----------------|"
- - "L0.1673[24,39] 748ns 152kb|------------------L0.1673------------------| "
- - "L0.1674[40,54] 748ns 141kb |----------------L0.1674-----------------|"
- - "L0.1678[24,39] 749ns 152kb|------------------L0.1678------------------| "
- - "L0.1679[40,54] 749ns 141kb |----------------L0.1679-----------------|"
- - "L0.1683[24,39] 750ns 152kb|------------------L0.1683------------------| "
- - "L0.1684[40,54] 750ns 141kb |----------------L0.1684-----------------|"
- - "L0.1688[24,39] 751ns 152kb|------------------L0.1688------------------| "
- - "L0.1689[40,54] 751ns 141kb |----------------L0.1689-----------------|"
- - "L0.1693[24,39] 752ns 152kb|------------------L0.1693------------------| "
- - "L0.1694[40,54] 752ns 141kb |----------------L0.1694-----------------|"
- - "L0.1698[24,39] 753ns 152kb|------------------L0.1698------------------| "
- - "L0.1699[40,54] 753ns 141kb |----------------L0.1699-----------------|"
- - "L0.1703[24,39] 754ns 152kb|------------------L0.1703------------------| "
- - "L0.1704[40,54] 754ns 141kb |----------------L0.1704-----------------|"
- - "L0.1708[24,39] 755ns 152kb|------------------L0.1708------------------| "
- - "L0.1709[40,54] 755ns 141kb |----------------L0.1709-----------------|"
- - "L0.1713[24,39] 756ns 152kb|------------------L0.1713------------------| "
- - "L0.1714[40,54] 756ns 141kb |----------------L0.1714-----------------|"
- - "L0.1718[24,39] 757ns 152kb|------------------L0.1718------------------| "
- - "L0.1719[40,54] 757ns 141kb |----------------L0.1719-----------------|"
- - "L0.1723[24,39] 758ns 152kb|------------------L0.1723------------------| "
- - "L0.1724[40,54] 758ns 141kb |----------------L0.1724-----------------|"
- - "L0.1728[24,39] 759ns 152kb|------------------L0.1728------------------| "
- - "L0.1729[40,54] 759ns 141kb |----------------L0.1729-----------------|"
- - "L0.1733[24,39] 760ns 152kb|------------------L0.1733------------------| "
- - "L0.1734[40,54] 760ns 141kb |----------------L0.1734-----------------|"
- - "L0.1738[24,39] 761ns 152kb|------------------L0.1738------------------| "
- - "L0.1739[40,54] 761ns 141kb |----------------L0.1739-----------------|"
- - "L0.1743[24,39] 762ns 152kb|------------------L0.1743------------------| "
- - "L0.1744[40,54] 762ns 141kb |----------------L0.1744-----------------|"
- - "L0.1748[24,39] 763ns 152kb|------------------L0.1748------------------| "
- - "L0.1749[40,54] 763ns 141kb |----------------L0.1749-----------------|"
- - "L0.1753[24,39] 764ns 152kb|------------------L0.1753------------------| "
- - "L0.1754[40,54] 764ns 141kb |----------------L0.1754-----------------|"
- - "L0.1758[24,39] 765ns 152kb|------------------L0.1758------------------| "
- - "L0.1759[40,54] 765ns 141kb |----------------L0.1759-----------------|"
- - "L0.1763[24,39] 766ns 152kb|------------------L0.1763------------------| "
- - "L0.1764[40,54] 766ns 141kb |----------------L0.1764-----------------|"
- - "L0.1768[24,39] 767ns 152kb|------------------L0.1768------------------| "
- - "L0.1769[40,54] 767ns 141kb |----------------L0.1769-----------------|"
- - "L0.1773[24,39] 768ns 152kb|------------------L0.1773------------------| "
- - "L0.1774[40,54] 768ns 141kb |----------------L0.1774-----------------|"
- - "L0.1778[24,39] 769ns 152kb|------------------L0.1778------------------| "
- - "L0.1779[40,54] 769ns 141kb |----------------L0.1779-----------------|"
- - "L0.1783[24,39] 770ns 152kb|------------------L0.1783------------------| "
- - "L0.1784[40,54] 770ns 141kb |----------------L0.1784-----------------|"
- - "L0.1788[24,39] 771ns 152kb|------------------L0.1788------------------| "
- - "L0.1789[40,54] 771ns 141kb |----------------L0.1789-----------------|"
- - "L0.1793[24,39] 772ns 152kb|------------------L0.1793------------------| "
- - "L0.1794[40,54] 772ns 141kb |----------------L0.1794-----------------|"
- - "L0.1798[24,39] 773ns 152kb|------------------L0.1798------------------| "
- - "L0.1799[40,54] 773ns 141kb |----------------L0.1799-----------------|"
- - "L0.1803[24,39] 774ns 152kb|------------------L0.1803------------------| "
- - "L0.1804[40,54] 774ns 141kb |----------------L0.1804-----------------|"
- - "L0.1808[24,39] 775ns 152kb|------------------L0.1808------------------| "
- - "L0.1809[40,54] 775ns 141kb |----------------L0.1809-----------------|"
- - "L0.1853[24,39] 776ns 152kb|------------------L0.1853------------------| "
- - "L0.1854[40,54] 776ns 141kb |----------------L0.1854-----------------|"
- - "L0.1858[24,39] 777ns 152kb|------------------L0.1858------------------| "
- - "L0.1859[40,54] 777ns 141kb |----------------L0.1859-----------------|"
- - "L0.1813[24,39] 778ns 152kb|------------------L0.1813------------------| "
- - "L0.1814[40,54] 778ns 141kb |----------------L0.1814-----------------|"
- - "L0.1818[24,39] 779ns 152kb|------------------L0.1818------------------| "
- - "L0.1819[40,54] 779ns 141kb |----------------L0.1819-----------------|"
- - "L0.1823[24,39] 780ns 152kb|------------------L0.1823------------------| "
- - "L0.1824[40,54] 780ns 141kb |----------------L0.1824-----------------|"
- - "L0.1828[24,39] 781ns 152kb|------------------L0.1828------------------| "
- - "L0.1829[40,54] 781ns 141kb |----------------L0.1829-----------------|"
- - "L0.1833[24,39] 782ns 152kb|------------------L0.1833------------------| "
- - "L0.1834[40,54] 782ns 141kb |----------------L0.1834-----------------|"
- - "L0.1838[24,39] 783ns 152kb|------------------L0.1838------------------| "
- - "L0.1839[40,54] 783ns 141kb |----------------L0.1839-----------------|"
- - "L0.1843[24,39] 784ns 152kb|------------------L0.1843------------------| "
- - "L0.1844[40,54] 784ns 141kb |----------------L0.1844-----------------|"
- - "L0.1848[24,39] 785ns 152kb|------------------L0.1848------------------| "
- - "L0.1849[40,54] 785ns 141kb |----------------L0.1849-----------------|"
- - "L0.1863[24,39] 786ns 152kb|------------------L0.1863------------------| "
- - "L0.1864[40,54] 786ns 141kb |----------------L0.1864-----------------|"
- - "L0.1868[24,39] 787ns 152kb|------------------L0.1868------------------| "
- - "L0.1869[40,54] 787ns 141kb |----------------L0.1869-----------------|"
- - "L0.1873[24,39] 788ns 152kb|------------------L0.1873------------------| "
- - "L0.1874[40,54] 788ns 141kb |----------------L0.1874-----------------|"
- - "L0.1878[24,39] 789ns 152kb|------------------L0.1878------------------| "
- - "L0.1879[40,54] 789ns 141kb |----------------L0.1879-----------------|"
- - "L0.1883[24,39] 790ns 152kb|------------------L0.1883------------------| "
- - "L0.1884[40,54] 790ns 141kb |----------------L0.1884-----------------|"
- - "L0.1888[24,39] 791ns 152kb|------------------L0.1888------------------| "
- - "L0.1889[40,54] 791ns 141kb |----------------L0.1889-----------------|"
- - "L0.1893[24,39] 792ns 152kb|------------------L0.1893------------------| "
- - "L0.1894[40,54] 792ns 141kb |----------------L0.1894-----------------|"
- - "L0.1898[24,39] 793ns 152kb|------------------L0.1898------------------| "
- - "L0.1899[40,54] 793ns 141kb |----------------L0.1899-----------------|"
- - "L0.1903[24,39] 794ns 152kb|------------------L0.1903------------------| "
- - "L0.1904[40,54] 794ns 141kb |----------------L0.1904-----------------|"
- - "L0.1908[24,39] 795ns 152kb|------------------L0.1908------------------| "
- - "L0.1909[40,54] 795ns 141kb |----------------L0.1909-----------------|"
- - "L0.1913[24,39] 796ns 152kb|------------------L0.1913------------------| "
- - "L0.1914[40,54] 796ns 141kb |----------------L0.1914-----------------|"
- - "L0.1918[24,39] 797ns 152kb|------------------L0.1918------------------| "
- - "L0.1919[40,54] 797ns 141kb |----------------L0.1919-----------------|"
- - "L0.1923[24,39] 798ns 152kb|------------------L0.1923------------------| "
- - "L0.1924[40,54] 798ns 141kb |----------------L0.1924-----------------|"
- - "L0.1928[24,39] 799ns 152kb|------------------L0.1928------------------| "
- - "L0.1929[40,54] 799ns 141kb |----------------L0.1929-----------------|"
- - "L0.1933[24,39] 800ns 152kb|------------------L0.1933------------------| "
- - "L0.1934[40,54] 800ns 141kb |----------------L0.1934-----------------|"
- - "L0.1938[24,39] 801ns 152kb|------------------L0.1938------------------| "
- - "L0.1939[40,54] 801ns 141kb |----------------L0.1939-----------------|"
- - "L0.1943[24,39] 802ns 152kb|------------------L0.1943------------------| "
- - "L0.1944[40,54] 802ns 141kb |----------------L0.1944-----------------|"
- - "L0.1948[24,39] 803ns 152kb|------------------L0.1948------------------| "
- - "L0.1949[40,54] 803ns 141kb |----------------L0.1949-----------------|"
- - "L0.1953[24,39] 804ns 152kb|------------------L0.1953------------------| "
- - "L0.1954[40,54] 804ns 141kb |----------------L0.1954-----------------|"
- - "L0.1958[24,39] 805ns 152kb|------------------L0.1958------------------| "
- - "L0.1959[40,54] 805ns 141kb |----------------L0.1959-----------------|"
- - "L0.1963[24,39] 806ns 152kb|------------------L0.1963------------------| "
- - "L0.1964[40,54] 806ns 141kb |----------------L0.1964-----------------|"
- - "L0.1968[24,39] 807ns 152kb|------------------L0.1968------------------| "
- - "L0.1969[40,54] 807ns 141kb |----------------L0.1969-----------------|"
- - "L0.1973[24,39] 808ns 152kb|------------------L0.1973------------------| "
- - "L0.1974[40,54] 808ns 141kb |----------------L0.1974-----------------|"
- - "L0.1978[24,39] 809ns 152kb|------------------L0.1978------------------| "
- - "L0.1979[40,54] 809ns 141kb |----------------L0.1979-----------------|"
- - "L0.1983[24,39] 810ns 152kb|------------------L0.1983------------------| "
- - "L0.1984[40,54] 810ns 141kb |----------------L0.1984-----------------|"
- - "L0.1988[24,39] 811ns 152kb|------------------L0.1988------------------| "
- - "L0.1989[40,54] 811ns 141kb |----------------L0.1989-----------------|"
- - "L0.1993[24,39] 812ns 152kb|------------------L0.1993------------------| "
- - "L0.1994[40,54] 812ns 141kb |----------------L0.1994-----------------|"
- - "L0.1998[24,39] 813ns 152kb|------------------L0.1998------------------| "
- - "L0.1999[40,54] 813ns 141kb |----------------L0.1999-----------------|"
- - "L0.2003[24,39] 814ns 152kb|------------------L0.2003------------------| "
- - "**** 2 Output Files (parquet_file_id not yet assigned), 29mb total:"
- - "L0 "
- - "L0.?[24,48] 814ns 23mb |---------------------------------L0.?---------------------------------| "
- - "L0.?[49,54] 814ns 6mb |----L0.?-----|"
- - "Committing partition 1:"
- - " Soft Deleting 200 files: L0.1504, L0.1508, L0.1509, L0.1513, L0.1514, L0.1518, L0.1519, L0.1523, L0.1524, L0.1528, L0.1529, L0.1533, L0.1534, L0.1538, L0.1539, L0.1543, L0.1544, L0.1548, L0.1549, L0.1553, L0.1554, L0.1558, L0.1559, L0.1563, L0.1564, L0.1568, L0.1569, L0.1573, L0.1574, L0.1578, L0.1579, L0.1583, L0.1584, L0.1588, L0.1589, L0.1593, L0.1594, L0.1598, L0.1599, L0.1603, L0.1604, L0.1608, L0.1609, L0.1613, L0.1614, L0.1618, L0.1619, L0.1623, L0.1624, L0.1628, L0.1629, L0.1633, L0.1634, L0.1638, L0.1639, L0.1643, L0.1644, L0.1648, L0.1649, L0.1653, L0.1654, L0.1658, L0.1659, L0.1663, L0.1664, L0.1668, L0.1669, L0.1673, L0.1674, L0.1678, L0.1679, L0.1683, L0.1684, L0.1688, L0.1689, L0.1693, L0.1694, L0.1698, L0.1699, L0.1703, L0.1704, L0.1708, L0.1709, L0.1713, L0.1714, L0.1718, L0.1719, L0.1723, L0.1724, L0.1728, L0.1729, L0.1733, L0.1734, L0.1738, L0.1739, L0.1743, L0.1744, L0.1748, L0.1749, L0.1753, L0.1754, L0.1758, L0.1759, L0.1763, L0.1764, L0.1768, L0.1769, L0.1773, L0.1774, L0.1778, L0.1779, L0.1783, L0.1784, L0.1788, L0.1789, L0.1793, L0.1794, L0.1798, L0.1799, L0.1803, L0.1804, L0.1808, L0.1809, L0.1813, L0.1814, L0.1818, L0.1819, L0.1823, L0.1824, L0.1828, L0.1829, L0.1833, L0.1834, L0.1838, L0.1839, L0.1843, L0.1844, L0.1848, L0.1849, L0.1853, L0.1854, L0.1858, L0.1859, L0.1863, L0.1864, L0.1868, L0.1869, L0.1873, L0.1874, L0.1878, L0.1879, L0.1883, L0.1884, L0.1888, L0.1889, L0.1893, L0.1894, L0.1898, L0.1899, L0.1903, L0.1904, L0.1908, L0.1909, L0.1913, L0.1914, L0.1918, L0.1919, L0.1923, L0.1924, L0.1928, L0.1929, L0.1933, L0.1934, L0.1938, L0.1939, L0.1943, L0.1944, L0.1948, L0.1949, L0.1953, L0.1954, L0.1958, L0.1959, L0.1963, L0.1964, L0.1968, L0.1969, L0.1973, L0.1974, L0.1978, L0.1979, L0.1983, L0.1984, L0.1988, L0.1989, L0.1993, L0.1994, L0.1998, L0.1999, L0.2003"
- - " Creating 2 files"
- - "**** Simulation run 422, type=split(CompactAndSplitOutput(ManySmallFiles))(split_times=[96]). 200 Input Files, 149mb total:"
- - "L0 "
- - "L0.3043[85,96] 615ns 70mb|----------------------------L0.3043-----------------------------| "
- - "L0.1010[97,100] 615ns 41mb |----L0.1010-----|"
- - "L0.1017[85,100] 616ns 192kb|----------------------------------------L0.1017-----------------------------------------|"
- - "L0.1022[85,100] 617ns 192kb|----------------------------------------L0.1022-----------------------------------------|"
- - "L0.1027[85,100] 618ns 192kb|----------------------------------------L0.1027-----------------------------------------|"
- - "L0.1032[85,100] 619ns 192kb|----------------------------------------L0.1032-----------------------------------------|"
- - "L0.1037[85,100] 620ns 192kb|----------------------------------------L0.1037-----------------------------------------|"
- - "L0.1042[85,100] 621ns 192kb|----------------------------------------L0.1042-----------------------------------------|"
- - "L0.1047[85,100] 622ns 192kb|----------------------------------------L0.1047-----------------------------------------|"
- - "L0.1052[85,100] 623ns 192kb|----------------------------------------L0.1052-----------------------------------------|"
- - "L0.1057[85,100] 624ns 192kb|----------------------------------------L0.1057-----------------------------------------|"
- - "L0.1062[85,100] 625ns 192kb|----------------------------------------L0.1062-----------------------------------------|"
- - "L0.1067[85,100] 626ns 192kb|----------------------------------------L0.1067-----------------------------------------|"
- - "L0.1072[85,100] 627ns 192kb|----------------------------------------L0.1072-----------------------------------------|"
- - "L0.1077[85,100] 628ns 192kb|----------------------------------------L0.1077-----------------------------------------|"
- - "L0.1082[85,100] 629ns 192kb|----------------------------------------L0.1082-----------------------------------------|"
- - "L0.1087[85,100] 630ns 192kb|----------------------------------------L0.1087-----------------------------------------|"
- - "L0.1092[85,100] 631ns 192kb|----------------------------------------L0.1092-----------------------------------------|"
- - "L0.1097[85,100] 632ns 192kb|----------------------------------------L0.1097-----------------------------------------|"
- - "L0.1102[85,100] 633ns 192kb|----------------------------------------L0.1102-----------------------------------------|"
- - "L0.1107[85,100] 634ns 192kb|----------------------------------------L0.1107-----------------------------------------|"
- - "L0.1112[85,100] 635ns 192kb|----------------------------------------L0.1112-----------------------------------------|"
- - "L0.1117[85,100] 636ns 192kb|----------------------------------------L0.1117-----------------------------------------|"
- - "L0.1122[85,100] 637ns 192kb|----------------------------------------L0.1122-----------------------------------------|"
- - "L0.1127[85,100] 638ns 192kb|----------------------------------------L0.1127-----------------------------------------|"
- - "L0.1132[85,100] 639ns 192kb|----------------------------------------L0.1132-----------------------------------------|"
- - "L0.1137[85,100] 640ns 192kb|----------------------------------------L0.1137-----------------------------------------|"
- - "L0.1142[85,100] 641ns 192kb|----------------------------------------L0.1142-----------------------------------------|"
- - "L0.1147[85,100] 642ns 192kb|----------------------------------------L0.1147-----------------------------------------|"
- - "L0.1152[85,100] 643ns 192kb|----------------------------------------L0.1152-----------------------------------------|"
- - "L0.1157[85,100] 644ns 192kb|----------------------------------------L0.1157-----------------------------------------|"
- - "L0.1162[85,100] 645ns 192kb|----------------------------------------L0.1162-----------------------------------------|"
- - "L0.1167[85,100] 646ns 192kb|----------------------------------------L0.1167-----------------------------------------|"
- - "L0.1172[85,100] 647ns 192kb|----------------------------------------L0.1172-----------------------------------------|"
- - "L0.1217[85,100] 648ns 192kb|----------------------------------------L0.1217-----------------------------------------|"
- - "L0.1222[85,100] 649ns 192kb|----------------------------------------L0.1222-----------------------------------------|"
- - "L0.1177[85,100] 650ns 192kb|----------------------------------------L0.1177-----------------------------------------|"
- - "L0.1182[85,100] 651ns 192kb|----------------------------------------L0.1182-----------------------------------------|"
- - "L0.1187[85,100] 652ns 192kb|----------------------------------------L0.1187-----------------------------------------|"
- - "L0.1192[85,100] 653ns 192kb|----------------------------------------L0.1192-----------------------------------------|"
- - "L0.1197[85,100] 654ns 192kb|----------------------------------------L0.1197-----------------------------------------|"
- - "L0.1202[85,100] 655ns 192kb|----------------------------------------L0.1202-----------------------------------------|"
- - "L0.1207[85,100] 656ns 192kb|----------------------------------------L0.1207-----------------------------------------|"
- - "L0.1212[85,100] 657ns 192kb|----------------------------------------L0.1212-----------------------------------------|"
- - "L0.1227[85,100] 658ns 192kb|----------------------------------------L0.1227-----------------------------------------|"
- - "L0.1232[85,100] 659ns 192kb|----------------------------------------L0.1232-----------------------------------------|"
- - "L0.1237[85,100] 660ns 192kb|----------------------------------------L0.1237-----------------------------------------|"
- - "L0.1242[85,100] 661ns 192kb|----------------------------------------L0.1242-----------------------------------------|"
- - "L0.1247[85,100] 662ns 192kb|----------------------------------------L0.1247-----------------------------------------|"
- - "L0.1252[85,100] 663ns 192kb|----------------------------------------L0.1252-----------------------------------------|"
- - "L0.1257[85,100] 664ns 192kb|----------------------------------------L0.1257-----------------------------------------|"
- - "L0.1262[85,100] 665ns 192kb|----------------------------------------L0.1262-----------------------------------------|"
- - "L0.1267[85,100] 666ns 192kb|----------------------------------------L0.1267-----------------------------------------|"
- - "L0.1272[85,100] 667ns 192kb|----------------------------------------L0.1272-----------------------------------------|"
- - "L0.1277[85,100] 668ns 192kb|----------------------------------------L0.1277-----------------------------------------|"
- - "L0.1282[85,100] 669ns 192kb|----------------------------------------L0.1282-----------------------------------------|"
- - "L0.1287[85,100] 670ns 192kb|----------------------------------------L0.1287-----------------------------------------|"
- - "L0.1292[85,100] 671ns 192kb|----------------------------------------L0.1292-----------------------------------------|"
- - "L0.1297[85,100] 672ns 192kb|----------------------------------------L0.1297-----------------------------------------|"
- - "L0.1302[85,100] 673ns 192kb|----------------------------------------L0.1302-----------------------------------------|"
- - "L0.1307[85,100] 674ns 192kb|----------------------------------------L0.1307-----------------------------------------|"
- - "L0.1312[85,100] 675ns 192kb|----------------------------------------L0.1312-----------------------------------------|"
- - "L0.1317[85,100] 676ns 192kb|----------------------------------------L0.1317-----------------------------------------|"
- - "L0.1322[85,100] 677ns 192kb|----------------------------------------L0.1322-----------------------------------------|"
- - "L0.1327[85,100] 678ns 192kb|----------------------------------------L0.1327-----------------------------------------|"
- - "L0.1332[85,100] 679ns 192kb|----------------------------------------L0.1332-----------------------------------------|"
- - "L0.1337[85,100] 680ns 192kb|----------------------------------------L0.1337-----------------------------------------|"
- - "L0.1342[85,100] 681ns 192kb|----------------------------------------L0.1342-----------------------------------------|"
- - "L0.1347[85,100] 682ns 192kb|----------------------------------------L0.1347-----------------------------------------|"
- - "L0.1352[85,100] 683ns 192kb|----------------------------------------L0.1352-----------------------------------------|"
- - "L0.1357[85,100] 684ns 192kb|----------------------------------------L0.1357-----------------------------------------|"
- - "L0.1362[85,100] 685ns 192kb|----------------------------------------L0.1362-----------------------------------------|"
- - "L0.1367[85,100] 686ns 192kb|----------------------------------------L0.1367-----------------------------------------|"
- - "L0.1372[85,100] 687ns 192kb|----------------------------------------L0.1372-----------------------------------------|"
- - "L0.1377[85,100] 688ns 192kb|----------------------------------------L0.1377-----------------------------------------|"
- - "L0.1382[85,100] 689ns 192kb|----------------------------------------L0.1382-----------------------------------------|"
- - "L0.1387[85,100] 690ns 192kb|----------------------------------------L0.1387-----------------------------------------|"
- - "L0.1392[85,100] 691ns 192kb|----------------------------------------L0.1392-----------------------------------------|"
- - "L0.1397[85,100] 692ns 192kb|----------------------------------------L0.1397-----------------------------------------|"
- - "L0.1402[85,100] 693ns 192kb|----------------------------------------L0.1402-----------------------------------------|"
- - "L0.1407[85,100] 694ns 192kb|----------------------------------------L0.1407-----------------------------------------|"
- - "L0.1412[85,100] 695ns 192kb|----------------------------------------L0.1412-----------------------------------------|"
- - "L0.1417[85,100] 696ns 192kb|----------------------------------------L0.1417-----------------------------------------|"
- - "L0.1422[85,100] 697ns 192kb|----------------------------------------L0.1422-----------------------------------------|"
- - "L0.1427[85,100] 698ns 192kb|----------------------------------------L0.1427-----------------------------------------|"
- - "L0.1432[85,100] 699ns 192kb|----------------------------------------L0.1432-----------------------------------------|"
- - "L0.1437[85,100] 700ns 192kb|----------------------------------------L0.1437-----------------------------------------|"
- - "L0.1442[85,100] 701ns 192kb|----------------------------------------L0.1442-----------------------------------------|"
- - "L0.1447[85,100] 702ns 192kb|----------------------------------------L0.1447-----------------------------------------|"
- - "L0.1452[85,100] 703ns 192kb|----------------------------------------L0.1452-----------------------------------------|"
- - "L0.1457[85,100] 704ns 192kb|----------------------------------------L0.1457-----------------------------------------|"
- - "L0.1462[85,100] 705ns 192kb|----------------------------------------L0.1462-----------------------------------------|"
- - "L0.1467[85,100] 706ns 192kb|----------------------------------------L0.1467-----------------------------------------|"
- - "L0.1472[85,100] 707ns 192kb|----------------------------------------L0.1472-----------------------------------------|"
- - "L0.1477[85,100] 708ns 192kb|----------------------------------------L0.1477-----------------------------------------|"
- - "L0.1482[85,100] 709ns 192kb|----------------------------------------L0.1482-----------------------------------------|"
- - "L0.1487[85,100] 710ns 192kb|----------------------------------------L0.1487-----------------------------------------|"
- - "L0.1492[85,100] 711ns 192kb|----------------------------------------L0.1492-----------------------------------------|"
- - "L0.1497[85,100] 712ns 192kb|----------------------------------------L0.1497-----------------------------------------|"
- - "L0.1502[85,100] 713ns 192kb|----------------------------------------L0.1502-----------------------------------------|"
- - "L0.1507[85,100] 714ns 192kb|----------------------------------------L0.1507-----------------------------------------|"
- - "L0.1512[85,100] 715ns 192kb|----------------------------------------L0.1512-----------------------------------------|"
- - "L0.1517[85,100] 716ns 192kb|----------------------------------------L0.1517-----------------------------------------|"
- - "L0.1522[85,100] 717ns 192kb|----------------------------------------L0.1522-----------------------------------------|"
- - "L0.1527[85,100] 718ns 192kb|----------------------------------------L0.1527-----------------------------------------|"
- - "L0.1532[85,100] 719ns 192kb|----------------------------------------L0.1532-----------------------------------------|"
- - "L0.1537[85,100] 720ns 192kb|----------------------------------------L0.1537-----------------------------------------|"
- - "L0.1542[85,100] 721ns 192kb|----------------------------------------L0.1542-----------------------------------------|"
- - "L0.1547[85,100] 722ns 192kb|----------------------------------------L0.1547-----------------------------------------|"
- - "L0.1552[85,100] 723ns 192kb|----------------------------------------L0.1552-----------------------------------------|"
- - "L0.1557[85,100] 724ns 192kb|----------------------------------------L0.1557-----------------------------------------|"
- - "L0.1562[85,100] 725ns 192kb|----------------------------------------L0.1562-----------------------------------------|"
- - "L0.1567[85,100] 726ns 192kb|----------------------------------------L0.1567-----------------------------------------|"
- - "L0.1572[85,100] 727ns 192kb|----------------------------------------L0.1572-----------------------------------------|"
- - "L0.1577[85,100] 728ns 192kb|----------------------------------------L0.1577-----------------------------------------|"
- - "L0.1582[85,100] 729ns 192kb|----------------------------------------L0.1582-----------------------------------------|"
- - "L0.1587[85,100] 730ns 192kb|----------------------------------------L0.1587-----------------------------------------|"
- - "L0.1592[85,100] 731ns 192kb|----------------------------------------L0.1592-----------------------------------------|"
- - "L0.1597[85,100] 732ns 192kb|----------------------------------------L0.1597-----------------------------------------|"
- - "L0.1602[85,100] 733ns 192kb|----------------------------------------L0.1602-----------------------------------------|"
- - "L0.1607[85,100] 734ns 192kb|----------------------------------------L0.1607-----------------------------------------|"
- - "L0.1612[85,100] 735ns 192kb|----------------------------------------L0.1612-----------------------------------------|"
- - "L0.1617[85,100] 736ns 192kb|----------------------------------------L0.1617-----------------------------------------|"
- - "L0.1622[85,100] 737ns 192kb|----------------------------------------L0.1622-----------------------------------------|"
- - "L0.1627[85,100] 738ns 192kb|----------------------------------------L0.1627-----------------------------------------|"
- - "L0.1632[85,100] 739ns 192kb|----------------------------------------L0.1632-----------------------------------------|"
- - "L0.1637[85,100] 740ns 192kb|----------------------------------------L0.1637-----------------------------------------|"
- - "L0.1642[85,100] 741ns 192kb|----------------------------------------L0.1642-----------------------------------------|"
- - "L0.1647[85,100] 742ns 192kb|----------------------------------------L0.1647-----------------------------------------|"
- - "L0.1652[85,100] 743ns 192kb|----------------------------------------L0.1652-----------------------------------------|"
- - "L0.1657[85,100] 744ns 192kb|----------------------------------------L0.1657-----------------------------------------|"
- - "L0.1662[85,100] 745ns 192kb|----------------------------------------L0.1662-----------------------------------------|"
- - "L0.1667[85,100] 746ns 192kb|----------------------------------------L0.1667-----------------------------------------|"
- - "L0.1672[85,100] 747ns 192kb|----------------------------------------L0.1672-----------------------------------------|"
- - "L0.1677[85,100] 748ns 192kb|----------------------------------------L0.1677-----------------------------------------|"
- - "L0.1682[85,100] 749ns 192kb|----------------------------------------L0.1682-----------------------------------------|"
- - "L0.1687[85,100] 750ns 192kb|----------------------------------------L0.1687-----------------------------------------|"
- - "L0.1692[85,100] 751ns 192kb|----------------------------------------L0.1692-----------------------------------------|"
- - "L0.1697[85,100] 752ns 192kb|----------------------------------------L0.1697-----------------------------------------|"
- - "L0.1702[85,100] 753ns 192kb|----------------------------------------L0.1702-----------------------------------------|"
- - "L0.1707[85,100] 754ns 192kb|----------------------------------------L0.1707-----------------------------------------|"
- - "L0.1712[85,100] 755ns 192kb|----------------------------------------L0.1712-----------------------------------------|"
- - "L0.1717[85,100] 756ns 192kb|----------------------------------------L0.1717-----------------------------------------|"
- - "L0.1722[85,100] 757ns 192kb|----------------------------------------L0.1722-----------------------------------------|"
- - "L0.1727[85,100] 758ns 192kb|----------------------------------------L0.1727-----------------------------------------|"
- - "L0.1732[85,100] 759ns 192kb|----------------------------------------L0.1732-----------------------------------------|"
- - "L0.1737[85,100] 760ns 192kb|----------------------------------------L0.1737-----------------------------------------|"
- - "L0.1742[85,100] 761ns 192kb|----------------------------------------L0.1742-----------------------------------------|"
- - "L0.1747[85,100] 762ns 192kb|----------------------------------------L0.1747-----------------------------------------|"
- - "L0.1752[85,100] 763ns 192kb|----------------------------------------L0.1752-----------------------------------------|"
- - "L0.1757[85,100] 764ns 192kb|----------------------------------------L0.1757-----------------------------------------|"
- - "L0.1762[85,100] 765ns 192kb|----------------------------------------L0.1762-----------------------------------------|"
- - "L0.1767[85,100] 766ns 192kb|----------------------------------------L0.1767-----------------------------------------|"
- - "L0.1772[85,100] 767ns 192kb|----------------------------------------L0.1772-----------------------------------------|"
- - "L0.1777[85,100] 768ns 192kb|----------------------------------------L0.1777-----------------------------------------|"
- - "L0.1782[85,100] 769ns 192kb|----------------------------------------L0.1782-----------------------------------------|"
- - "L0.1787[85,100] 770ns 192kb|----------------------------------------L0.1787-----------------------------------------|"
- - "L0.1792[85,100] 771ns 192kb|----------------------------------------L0.1792-----------------------------------------|"
- - "L0.1797[85,100] 772ns 192kb|----------------------------------------L0.1797-----------------------------------------|"
- - "L0.1802[85,100] 773ns 192kb|----------------------------------------L0.1802-----------------------------------------|"
- - "L0.1807[85,100] 774ns 192kb|----------------------------------------L0.1807-----------------------------------------|"
- - "L0.1812[85,100] 775ns 192kb|----------------------------------------L0.1812-----------------------------------------|"
- - "L0.1857[85,100] 776ns 192kb|----------------------------------------L0.1857-----------------------------------------|"
- - "L0.1862[85,100] 777ns 192kb|----------------------------------------L0.1862-----------------------------------------|"
- - "L0.1817[85,100] 778ns 192kb|----------------------------------------L0.1817-----------------------------------------|"
- - "L0.1822[85,100] 779ns 192kb|----------------------------------------L0.1822-----------------------------------------|"
- - "L0.1827[85,100] 780ns 192kb|----------------------------------------L0.1827-----------------------------------------|"
- - "L0.1832[85,100] 781ns 192kb|----------------------------------------L0.1832-----------------------------------------|"
- - "L0.1837[85,100] 782ns 192kb|----------------------------------------L0.1837-----------------------------------------|"
- - "L0.1842[85,100] 783ns 192kb|----------------------------------------L0.1842-----------------------------------------|"
- - "L0.1847[85,100] 784ns 192kb|----------------------------------------L0.1847-----------------------------------------|"
- - "L0.1852[85,100] 785ns 192kb|----------------------------------------L0.1852-----------------------------------------|"
- - "L0.1867[85,100] 786ns 192kb|----------------------------------------L0.1867-----------------------------------------|"
- - "L0.1872[85,100] 787ns 192kb|----------------------------------------L0.1872-----------------------------------------|"
- - "L0.1877[85,100] 788ns 192kb|----------------------------------------L0.1877-----------------------------------------|"
- - "L0.1882[85,100] 789ns 192kb|----------------------------------------L0.1882-----------------------------------------|"
- - "L0.1887[85,100] 790ns 192kb|----------------------------------------L0.1887-----------------------------------------|"
- - "L0.1892[85,100] 791ns 192kb|----------------------------------------L0.1892-----------------------------------------|"
- - "L0.1897[85,100] 792ns 192kb|----------------------------------------L0.1897-----------------------------------------|"
- - "L0.1902[85,100] 793ns 192kb|----------------------------------------L0.1902-----------------------------------------|"
- - "L0.1907[85,100] 794ns 192kb|----------------------------------------L0.1907-----------------------------------------|"
- - "L0.1912[85,100] 795ns 192kb|----------------------------------------L0.1912-----------------------------------------|"
- - "L0.1917[85,100] 796ns 192kb|----------------------------------------L0.1917-----------------------------------------|"
- - "L0.1922[85,100] 797ns 192kb|----------------------------------------L0.1922-----------------------------------------|"
- - "L0.1927[85,100] 798ns 192kb|----------------------------------------L0.1927-----------------------------------------|"
- - "L0.1932[85,100] 799ns 192kb|----------------------------------------L0.1932-----------------------------------------|"
- - "L0.1937[85,100] 800ns 192kb|----------------------------------------L0.1937-----------------------------------------|"
- - "L0.1942[85,100] 801ns 192kb|----------------------------------------L0.1942-----------------------------------------|"
- - "L0.1947[85,100] 802ns 192kb|----------------------------------------L0.1947-----------------------------------------|"
- - "L0.1952[85,100] 803ns 192kb|----------------------------------------L0.1952-----------------------------------------|"
- - "L0.1957[85,100] 804ns 192kb|----------------------------------------L0.1957-----------------------------------------|"
- - "L0.1962[85,100] 805ns 192kb|----------------------------------------L0.1962-----------------------------------------|"
- - "L0.1967[85,100] 806ns 192kb|----------------------------------------L0.1967-----------------------------------------|"
- - "L0.1972[85,100] 807ns 192kb|----------------------------------------L0.1972-----------------------------------------|"
- - "L0.1977[85,100] 808ns 192kb|----------------------------------------L0.1977-----------------------------------------|"
- - "L0.1982[85,100] 809ns 192kb|----------------------------------------L0.1982-----------------------------------------|"
- - "L0.1987[85,100] 810ns 192kb|----------------------------------------L0.1987-----------------------------------------|"
- - "L0.1992[85,100] 811ns 192kb|----------------------------------------L0.1992-----------------------------------------|"
- - "L0.1997[85,100] 812ns 192kb|----------------------------------------L0.1997-----------------------------------------|"
- - "L0.2002[85,100] 813ns 192kb|----------------------------------------L0.2002-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 149mb total:"
- - "L0 "
- - "L0.?[85,96] 813ns 109mb |------------------------------L0.?------------------------------| "
- - "L0.?[97,100] 813ns 40mb |------L0.?------|"
- - "Committing partition 1:"
- - " Soft Deleting 200 files: L0.1010, L0.1017, L0.1022, L0.1027, L0.1032, L0.1037, L0.1042, L0.1047, L0.1052, L0.1057, L0.1062, L0.1067, L0.1072, L0.1077, L0.1082, L0.1087, L0.1092, L0.1097, L0.1102, L0.1107, L0.1112, L0.1117, L0.1122, L0.1127, L0.1132, L0.1137, L0.1142, L0.1147, L0.1152, L0.1157, L0.1162, L0.1167, L0.1172, L0.1177, L0.1182, L0.1187, L0.1192, L0.1197, L0.1202, L0.1207, L0.1212, L0.1217, L0.1222, L0.1227, L0.1232, L0.1237, L0.1242, L0.1247, L0.1252, L0.1257, L0.1262, L0.1267, L0.1272, L0.1277, L0.1282, L0.1287, L0.1292, L0.1297, L0.1302, L0.1307, L0.1312, L0.1317, L0.1322, L0.1327, L0.1332, L0.1337, L0.1342, L0.1347, L0.1352, L0.1357, L0.1362, L0.1367, L0.1372, L0.1377, L0.1382, L0.1387, L0.1392, L0.1397, L0.1402, L0.1407, L0.1412, L0.1417, L0.1422, L0.1427, L0.1432, L0.1437, L0.1442, L0.1447, L0.1452, L0.1457, L0.1462, L0.1467, L0.1472, L0.1477, L0.1482, L0.1487, L0.1492, L0.1497, L0.1502, L0.1507, L0.1512, L0.1517, L0.1522, L0.1527, L0.1532, L0.1537, L0.1542, L0.1547, L0.1552, L0.1557, L0.1562, L0.1567, L0.1572, L0.1577, L0.1582, L0.1587, L0.1592, L0.1597, L0.1602, L0.1607, L0.1612, L0.1617, L0.1622, L0.1627, L0.1632, L0.1637, L0.1642, L0.1647, L0.1652, L0.1657, L0.1662, L0.1667, L0.1672, L0.1677, L0.1682, L0.1687, L0.1692, L0.1697, L0.1702, L0.1707, L0.1712, L0.1717, L0.1722, L0.1727, L0.1732, L0.1737, L0.1742, L0.1747, L0.1752, L0.1757, L0.1762, L0.1767, L0.1772, L0.1777, L0.1782, L0.1787, L0.1792, L0.1797, L0.1802, L0.1807, L0.1812, L0.1817, L0.1822, L0.1827, L0.1832, L0.1837, L0.1842, L0.1847, L0.1852, L0.1857, L0.1862, L0.1867, L0.1872, L0.1877, L0.1882, L0.1887, L0.1892, L0.1897, L0.1902, L0.1907, L0.1912, L0.1917, L0.1922, L0.1927, L0.1932, L0.1937, L0.1942, L0.1947, L0.1952, L0.1957, L0.1962, L0.1967, L0.1972, L0.1977, L0.1982, L0.1987, L0.1992, L0.1997, L0.2002, L0.3043"
- - " Creating 2 files"
- - "**** Simulation run 423, type=split(CompactAndSplitOutput(ManySmallFiles))(split_times=[97]). 200 Input Files, 38mb total:"
- - "L0, all files 192kb "
- - "L0.2007[85,100] 814ns |----------------------------------------L0.2007-----------------------------------------|"
- - "L0.2012[85,100] 815ns |----------------------------------------L0.2012-----------------------------------------|"
- - "L0.2017[85,100] 816ns |----------------------------------------L0.2017-----------------------------------------|"
- - "L0.2022[85,100] 817ns |----------------------------------------L0.2022-----------------------------------------|"
- - "L0.2027[85,100] 818ns |----------------------------------------L0.2027-----------------------------------------|"
- - "L0.2032[85,100] 819ns |----------------------------------------L0.2032-----------------------------------------|"
- - "L0.2037[85,100] 820ns |----------------------------------------L0.2037-----------------------------------------|"
- - "L0.2042[85,100] 821ns |----------------------------------------L0.2042-----------------------------------------|"
- - "L0.2047[85,100] 822ns |----------------------------------------L0.2047-----------------------------------------|"
- - "L0.2052[85,100] 823ns |----------------------------------------L0.2052-----------------------------------------|"
- - "L0.2057[85,100] 824ns |----------------------------------------L0.2057-----------------------------------------|"
- - "L0.2062[85,100] 825ns |----------------------------------------L0.2062-----------------------------------------|"
- - "L0.2067[85,100] 826ns |----------------------------------------L0.2067-----------------------------------------|"
- - "L0.2072[85,100] 827ns |----------------------------------------L0.2072-----------------------------------------|"
- - "L0.2077[85,100] 828ns |----------------------------------------L0.2077-----------------------------------------|"
- - "L0.2082[85,100] 829ns |----------------------------------------L0.2082-----------------------------------------|"
- - "L0.2087[85,100] 830ns |----------------------------------------L0.2087-----------------------------------------|"
- - "L0.2092[85,100] 831ns |----------------------------------------L0.2092-----------------------------------------|"
- - "L0.2097[85,100] 832ns |----------------------------------------L0.2097-----------------------------------------|"
- - "L0.2102[85,100] 833ns |----------------------------------------L0.2102-----------------------------------------|"
- - "L0.2107[85,100] 834ns |----------------------------------------L0.2107-----------------------------------------|"
- - "L0.2112[85,100] 835ns |----------------------------------------L0.2112-----------------------------------------|"
- - "L0.2117[85,100] 836ns |----------------------------------------L0.2117-----------------------------------------|"
- - "L0.2122[85,100] 837ns |----------------------------------------L0.2122-----------------------------------------|"
- - "L0.2127[85,100] 838ns |----------------------------------------L0.2127-----------------------------------------|"
- - "L0.2132[85,100] 839ns |----------------------------------------L0.2132-----------------------------------------|"
- - "L0.2137[85,100] 840ns |----------------------------------------L0.2137-----------------------------------------|"
- - "L0.2142[85,100] 841ns |----------------------------------------L0.2142-----------------------------------------|"
- - "L0.2147[85,100] 842ns |----------------------------------------L0.2147-----------------------------------------|"
- - "L0.2152[85,100] 843ns |----------------------------------------L0.2152-----------------------------------------|"
- - "L0.2157[85,100] 844ns |----------------------------------------L0.2157-----------------------------------------|"
- - "L0.2162[85,100] 845ns |----------------------------------------L0.2162-----------------------------------------|"
- - "L0.2167[85,100] 846ns |----------------------------------------L0.2167-----------------------------------------|"
- - "L0.2172[85,100] 847ns |----------------------------------------L0.2172-----------------------------------------|"
- - "L0.2177[85,100] 848ns |----------------------------------------L0.2177-----------------------------------------|"
- - "L0.2182[85,100] 849ns |----------------------------------------L0.2182-----------------------------------------|"
- - "L0.2187[85,100] 850ns |----------------------------------------L0.2187-----------------------------------------|"
- - "L0.2192[85,100] 851ns |----------------------------------------L0.2192-----------------------------------------|"
- - "L0.2197[85,100] 852ns |----------------------------------------L0.2197-----------------------------------------|"
- - "L0.2202[85,100] 853ns |----------------------------------------L0.2202-----------------------------------------|"
- - "L0.2207[85,100] 854ns |----------------------------------------L0.2207-----------------------------------------|"
- - "L0.2212[85,100] 855ns |----------------------------------------L0.2212-----------------------------------------|"
- - "L0.2217[85,100] 856ns |----------------------------------------L0.2217-----------------------------------------|"
- - "L0.2222[85,100] 857ns |----------------------------------------L0.2222-----------------------------------------|"
- - "L0.2227[85,100] 858ns |----------------------------------------L0.2227-----------------------------------------|"
- - "L0.2232[85,100] 859ns |----------------------------------------L0.2232-----------------------------------------|"
- - "L0.2237[85,100] 860ns |----------------------------------------L0.2237-----------------------------------------|"
- - "L0.2242[85,100] 861ns |----------------------------------------L0.2242-----------------------------------------|"
- - "L0.2247[85,100] 862ns |----------------------------------------L0.2247-----------------------------------------|"
- - "L0.2252[85,100] 863ns |----------------------------------------L0.2252-----------------------------------------|"
- - "L0.2257[85,100] 864ns |----------------------------------------L0.2257-----------------------------------------|"
- - "L0.2262[85,100] 865ns |----------------------------------------L0.2262-----------------------------------------|"
- - "L0.2267[85,100] 866ns |----------------------------------------L0.2267-----------------------------------------|"
- - "L0.2272[85,100] 867ns |----------------------------------------L0.2272-----------------------------------------|"
- - "L0.2277[85,100] 868ns |----------------------------------------L0.2277-----------------------------------------|"
- - "L0.2282[85,100] 869ns |----------------------------------------L0.2282-----------------------------------------|"
- - "L0.2287[85,100] 870ns |----------------------------------------L0.2287-----------------------------------------|"
- - "L0.2292[85,100] 871ns |----------------------------------------L0.2292-----------------------------------------|"
- - "L0.2297[85,100] 872ns |----------------------------------------L0.2297-----------------------------------------|"
- - "L0.2302[85,100] 873ns |----------------------------------------L0.2302-----------------------------------------|"
- - "L0.2307[85,100] 874ns |----------------------------------------L0.2307-----------------------------------------|"
- - "L0.2312[85,100] 875ns |----------------------------------------L0.2312-----------------------------------------|"
- - "L0.2317[85,100] 876ns |----------------------------------------L0.2317-----------------------------------------|"
- - "L0.2322[85,100] 877ns |----------------------------------------L0.2322-----------------------------------------|"
- - "L0.2327[85,100] 878ns |----------------------------------------L0.2327-----------------------------------------|"
- - "L0.2332[85,100] 879ns |----------------------------------------L0.2332-----------------------------------------|"
- - "L0.2337[85,100] 880ns |----------------------------------------L0.2337-----------------------------------------|"
- - "L0.2342[85,100] 881ns |----------------------------------------L0.2342-----------------------------------------|"
- - "L0.2347[85,100] 882ns |----------------------------------------L0.2347-----------------------------------------|"
- - "L0.2352[85,100] 883ns |----------------------------------------L0.2352-----------------------------------------|"
- - "L0.2357[85,100] 884ns |----------------------------------------L0.2357-----------------------------------------|"
- - "L0.2362[85,100] 885ns |----------------------------------------L0.2362-----------------------------------------|"
- - "L0.2367[85,100] 886ns |----------------------------------------L0.2367-----------------------------------------|"
- - "L0.2372[85,100] 887ns |----------------------------------------L0.2372-----------------------------------------|"
- - "L0.2377[85,100] 888ns |----------------------------------------L0.2377-----------------------------------------|"
- - "L0.2382[85,100] 889ns |----------------------------------------L0.2382-----------------------------------------|"
- - "L0.2387[85,100] 890ns |----------------------------------------L0.2387-----------------------------------------|"
- - "L0.2392[85,100] 891ns |----------------------------------------L0.2392-----------------------------------------|"
- - "L0.2397[85,100] 892ns |----------------------------------------L0.2397-----------------------------------------|"
- - "L0.2402[85,100] 893ns |----------------------------------------L0.2402-----------------------------------------|"
- - "L0.2407[85,100] 894ns |----------------------------------------L0.2407-----------------------------------------|"
- - "L0.2412[85,100] 895ns |----------------------------------------L0.2412-----------------------------------------|"
- - "L0.2417[85,100] 896ns |----------------------------------------L0.2417-----------------------------------------|"
- - "L0.2422[85,100] 897ns |----------------------------------------L0.2422-----------------------------------------|"
- - "L0.2427[85,100] 898ns |----------------------------------------L0.2427-----------------------------------------|"
- - "L0.2432[85,100] 899ns |----------------------------------------L0.2432-----------------------------------------|"
- - "L0.2437[85,100] 900ns |----------------------------------------L0.2437-----------------------------------------|"
- - "L0.2442[85,100] 901ns |----------------------------------------L0.2442-----------------------------------------|"
- - "L0.2447[85,100] 902ns |----------------------------------------L0.2447-----------------------------------------|"
- - "L0.2452[85,100] 903ns |----------------------------------------L0.2452-----------------------------------------|"
- - "L0.2497[85,100] 904ns |----------------------------------------L0.2497-----------------------------------------|"
- - "L0.2502[85,100] 905ns |----------------------------------------L0.2502-----------------------------------------|"
- - "L0.2457[85,100] 906ns |----------------------------------------L0.2457-----------------------------------------|"
- - "L0.2462[85,100] 907ns |----------------------------------------L0.2462-----------------------------------------|"
- - "L0.2467[85,100] 908ns |----------------------------------------L0.2467-----------------------------------------|"
- - "L0.2472[85,100] 909ns |----------------------------------------L0.2472-----------------------------------------|"
- - "L0.2477[85,100] 910ns |----------------------------------------L0.2477-----------------------------------------|"
- - "L0.2482[85,100] 911ns |----------------------------------------L0.2482-----------------------------------------|"
- - "L0.2487[85,100] 912ns |----------------------------------------L0.2487-----------------------------------------|"
- - "L0.2492[85,100] 913ns |----------------------------------------L0.2492-----------------------------------------|"
- - "L0.2507[85,100] 914ns |----------------------------------------L0.2507-----------------------------------------|"
- - "L0.2512[85,100] 915ns |----------------------------------------L0.2512-----------------------------------------|"
- - "L0.2517[85,100] 916ns |----------------------------------------L0.2517-----------------------------------------|"
- - "L0.2522[85,100] 917ns |----------------------------------------L0.2522-----------------------------------------|"
- - "L0.2527[85,100] 918ns |----------------------------------------L0.2527-----------------------------------------|"
- - "L0.2532[85,100] 919ns |----------------------------------------L0.2532-----------------------------------------|"
- - "L0.2537[85,100] 920ns |----------------------------------------L0.2537-----------------------------------------|"
- - "L0.2542[85,100] 921ns |----------------------------------------L0.2542-----------------------------------------|"
- - "L0.2547[85,100] 922ns |----------------------------------------L0.2547-----------------------------------------|"
- - "L0.2552[85,100] 923ns |----------------------------------------L0.2552-----------------------------------------|"
- - "L0.2557[85,100] 924ns |----------------------------------------L0.2557-----------------------------------------|"
- - "L0.2562[85,100] 925ns |----------------------------------------L0.2562-----------------------------------------|"
- - "L0.2567[85,100] 926ns |----------------------------------------L0.2567-----------------------------------------|"
- - "L0.2572[85,100] 927ns |----------------------------------------L0.2572-----------------------------------------|"
- - "L0.2577[85,100] 928ns |----------------------------------------L0.2577-----------------------------------------|"
- - "L0.2582[85,100] 929ns |----------------------------------------L0.2582-----------------------------------------|"
- - "L0.2587[85,100] 930ns |----------------------------------------L0.2587-----------------------------------------|"
- - "L0.2592[85,100] 931ns |----------------------------------------L0.2592-----------------------------------------|"
- - "L0.2597[85,100] 932ns |----------------------------------------L0.2597-----------------------------------------|"
- - "L0.2602[85,100] 933ns |----------------------------------------L0.2602-----------------------------------------|"
- - "L0.2607[85,100] 934ns |----------------------------------------L0.2607-----------------------------------------|"
- - "L0.2612[85,100] 935ns |----------------------------------------L0.2612-----------------------------------------|"
- - "L0.2617[85,100] 936ns |----------------------------------------L0.2617-----------------------------------------|"
- - "L0.2622[85,100] 937ns |----------------------------------------L0.2622-----------------------------------------|"
- - "L0.2627[85,100] 938ns |----------------------------------------L0.2627-----------------------------------------|"
- - "L0.2632[85,100] 939ns |----------------------------------------L0.2632-----------------------------------------|"
- - "L0.2637[85,100] 940ns |----------------------------------------L0.2637-----------------------------------------|"
- - "L0.2642[85,100] 941ns |----------------------------------------L0.2642-----------------------------------------|"
- - "L0.2647[85,100] 942ns |----------------------------------------L0.2647-----------------------------------------|"
- - "L0.2652[85,100] 943ns |----------------------------------------L0.2652-----------------------------------------|"
- - "L0.2657[85,100] 944ns |----------------------------------------L0.2657-----------------------------------------|"
- - "L0.2662[85,100] 945ns |----------------------------------------L0.2662-----------------------------------------|"
- - "L0.2667[85,100] 946ns |----------------------------------------L0.2667-----------------------------------------|"
- - "L0.2672[85,100] 947ns |----------------------------------------L0.2672-----------------------------------------|"
- - "L0.2677[85,100] 948ns |----------------------------------------L0.2677-----------------------------------------|"
- - "L0.2682[85,100] 949ns |----------------------------------------L0.2682-----------------------------------------|"
- - "L0.2687[85,100] 950ns |----------------------------------------L0.2687-----------------------------------------|"
- - "L0.2692[85,100] 951ns |----------------------------------------L0.2692-----------------------------------------|"
- - "L0.2697[85,100] 952ns |----------------------------------------L0.2697-----------------------------------------|"
- - "L0.2702[85,100] 953ns |----------------------------------------L0.2702-----------------------------------------|"
- - "L0.2707[85,100] 954ns |----------------------------------------L0.2707-----------------------------------------|"
- - "L0.2712[85,100] 955ns |----------------------------------------L0.2712-----------------------------------------|"
- - "L0.2717[85,100] 956ns |----------------------------------------L0.2717-----------------------------------------|"
- - "L0.2722[85,100] 957ns |----------------------------------------L0.2722-----------------------------------------|"
- - "L0.2727[85,100] 958ns |----------------------------------------L0.2727-----------------------------------------|"
- - "L0.2732[85,100] 959ns |----------------------------------------L0.2732-----------------------------------------|"
- - "L0.2737[85,100] 960ns |----------------------------------------L0.2737-----------------------------------------|"
- - "L0.2742[85,100] 961ns |----------------------------------------L0.2742-----------------------------------------|"
- - "L0.2747[85,100] 962ns |----------------------------------------L0.2747-----------------------------------------|"
- - "L0.2752[85,100] 963ns |----------------------------------------L0.2752-----------------------------------------|"
- - "L0.2757[85,100] 964ns |----------------------------------------L0.2757-----------------------------------------|"
- - "L0.2762[85,100] 965ns |----------------------------------------L0.2762-----------------------------------------|"
- - "L0.2767[85,100] 966ns |----------------------------------------L0.2767-----------------------------------------|"
- - "L0.2772[85,100] 967ns |----------------------------------------L0.2772-----------------------------------------|"
- - "L0.2777[85,100] 968ns |----------------------------------------L0.2777-----------------------------------------|"
- - "L0.2782[85,100] 969ns |----------------------------------------L0.2782-----------------------------------------|"
- - "L0.2787[85,100] 970ns |----------------------------------------L0.2787-----------------------------------------|"
- - "L0.2792[85,100] 971ns |----------------------------------------L0.2792-----------------------------------------|"
- - "L0.2797[85,100] 972ns |----------------------------------------L0.2797-----------------------------------------|"
- - "L0.2802[85,100] 973ns |----------------------------------------L0.2802-----------------------------------------|"
- - "L0.2807[85,100] 974ns |----------------------------------------L0.2807-----------------------------------------|"
- - "L0.2812[85,100] 975ns |----------------------------------------L0.2812-----------------------------------------|"
- - "L0.2817[85,100] 976ns |----------------------------------------L0.2817-----------------------------------------|"
- - "L0.2822[85,100] 977ns |----------------------------------------L0.2822-----------------------------------------|"
- - "L0.2827[85,100] 978ns |----------------------------------------L0.2827-----------------------------------------|"
- - "L0.2832[85,100] 979ns |----------------------------------------L0.2832-----------------------------------------|"
- - "L0.2837[85,100] 980ns |----------------------------------------L0.2837-----------------------------------------|"
- - "L0.2842[85,100] 981ns |----------------------------------------L0.2842-----------------------------------------|"
- - "L0.2847[85,100] 982ns |----------------------------------------L0.2847-----------------------------------------|"
- - "L0.2852[85,100] 983ns |----------------------------------------L0.2852-----------------------------------------|"
- - "L0.2857[85,100] 984ns |----------------------------------------L0.2857-----------------------------------------|"
- - "L0.2862[85,100] 985ns |----------------------------------------L0.2862-----------------------------------------|"
- - "L0.2867[85,100] 986ns |----------------------------------------L0.2867-----------------------------------------|"
- - "L0.2872[85,100] 987ns |----------------------------------------L0.2872-----------------------------------------|"
- - "L0.2877[85,100] 988ns |----------------------------------------L0.2877-----------------------------------------|"
- - "L0.2882[85,100] 989ns |----------------------------------------L0.2882-----------------------------------------|"
- - "L0.2887[85,100] 990ns |----------------------------------------L0.2887-----------------------------------------|"
- - "L0.2892[85,100] 991ns |----------------------------------------L0.2892-----------------------------------------|"
- - "L0.2897[85,100] 992ns |----------------------------------------L0.2897-----------------------------------------|"
- - "L0.2902[85,100] 993ns |----------------------------------------L0.2902-----------------------------------------|"
- - "L0.2907[85,100] 994ns |----------------------------------------L0.2907-----------------------------------------|"
- - "L0.2912[85,100] 995ns |----------------------------------------L0.2912-----------------------------------------|"
- - "L0.2917[85,100] 996ns |----------------------------------------L0.2917-----------------------------------------|"
- - "L0.2922[85,100] 997ns |----------------------------------------L0.2922-----------------------------------------|"
- - "L0.2927[85,100] 998ns |----------------------------------------L0.2927-----------------------------------------|"
- - "L0.2932[85,100] 999ns |----------------------------------------L0.2932-----------------------------------------|"
- - "L0.2937[85,100] 1us |----------------------------------------L0.2937-----------------------------------------|"
- - "L0.2942[85,100] 1us |----------------------------------------L0.2942-----------------------------------------|"
- - "L0.2947[85,100] 1us |----------------------------------------L0.2947-----------------------------------------|"
- - "L0.2952[85,100] 1us |----------------------------------------L0.2952-----------------------------------------|"
- - "L0.2957[85,100] 1us |----------------------------------------L0.2957-----------------------------------------|"
- - "L0.2962[85,100] 1us |----------------------------------------L0.2962-----------------------------------------|"
- - "L0.2967[85,100] 1.01us |----------------------------------------L0.2967-----------------------------------------|"
- - "L0.2972[85,100] 1.01us |----------------------------------------L0.2972-----------------------------------------|"
- - "L0.2977[85,100] 1.01us |----------------------------------------L0.2977-----------------------------------------|"
- - "L0.2982[85,100] 1.01us |----------------------------------------L0.2982-----------------------------------------|"
- - "L0.2987[85,100] 1.01us |----------------------------------------L0.2987-----------------------------------------|"
- - "L0.2992[85,100] 1.01us |----------------------------------------L0.2992-----------------------------------------|"
- - "L0.2997[85,100] 1.01us |----------------------------------------L0.2997-----------------------------------------|"
- - "L0.3002[85,100] 1.01us |----------------------------------------L0.3002-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 38mb total:"
- - "L0 "
- - "L0.?[85,97] 1.01us 30mb |---------------------------------L0.?---------------------------------| "
- - "L0.?[98,100] 1.01us 8mb |---L0.?---|"
- - "Committing partition 1:"
- - " Soft Deleting 200 files: L0.2007, L0.2012, L0.2017, L0.2022, L0.2027, L0.2032, L0.2037, L0.2042, L0.2047, L0.2052, L0.2057, L0.2062, L0.2067, L0.2072, L0.2077, L0.2082, L0.2087, L0.2092, L0.2097, L0.2102, L0.2107, L0.2112, L0.2117, L0.2122, L0.2127, L0.2132, L0.2137, L0.2142, L0.2147, L0.2152, L0.2157, L0.2162, L0.2167, L0.2172, L0.2177, L0.2182, L0.2187, L0.2192, L0.2197, L0.2202, L0.2207, L0.2212, L0.2217, L0.2222, L0.2227, L0.2232, L0.2237, L0.2242, L0.2247, L0.2252, L0.2257, L0.2262, L0.2267, L0.2272, L0.2277, L0.2282, L0.2287, L0.2292, L0.2297, L0.2302, L0.2307, L0.2312, L0.2317, L0.2322, L0.2327, L0.2332, L0.2337, L0.2342, L0.2347, L0.2352, L0.2357, L0.2362, L0.2367, L0.2372, L0.2377, L0.2382, L0.2387, L0.2392, L0.2397, L0.2402, L0.2407, L0.2412, L0.2417, L0.2422, L0.2427, L0.2432, L0.2437, L0.2442, L0.2447, L0.2452, L0.2457, L0.2462, L0.2467, L0.2472, L0.2477, L0.2482, L0.2487, L0.2492, L0.2497, L0.2502, L0.2507, L0.2512, L0.2517, L0.2522, L0.2527, L0.2532, L0.2537, L0.2542, L0.2547, L0.2552, L0.2557, L0.2562, L0.2567, L0.2572, L0.2577, L0.2582, L0.2587, L0.2592, L0.2597, L0.2602, L0.2607, L0.2612, L0.2617, L0.2622, L0.2627, L0.2632, L0.2637, L0.2642, L0.2647, L0.2652, L0.2657, L0.2662, L0.2667, L0.2672, L0.2677, L0.2682, L0.2687, L0.2692, L0.2697, L0.2702, L0.2707, L0.2712, L0.2717, L0.2722, L0.2727, L0.2732, L0.2737, L0.2742, L0.2747, L0.2752, L0.2757, L0.2762, L0.2767, L0.2772, L0.2777, L0.2782, L0.2787, L0.2792, L0.2797, L0.2802, L0.2807, L0.2812, L0.2817, L0.2822, L0.2827, L0.2832, L0.2837, L0.2842, L0.2847, L0.2852, L0.2857, L0.2862, L0.2867, L0.2872, L0.2877, L0.2882, L0.2887, L0.2892, L0.2897, L0.2902, L0.2907, L0.2912, L0.2917, L0.2922, L0.2927, L0.2932, L0.2937, L0.2942, L0.2947, L0.2952, L0.2957, L0.2962, L0.2967, L0.2972, L0.2977, L0.2982, L0.2987, L0.2992, L0.2997, L0.3002"
- - " Creating 2 files"
- - "**** Simulation run 424, type=compact(ManySmallFiles). 7 Input Files, 1mb total:"
- - "L0, all files 192kb "
- - "L0.3007[85,100] 1.01us |----------------------------------------L0.3007-----------------------------------------|"
- - "L0.3012[85,100] 1.01us |----------------------------------------L0.3012-----------------------------------------|"
- - "L0.3017[85,100] 1.02us |----------------------------------------L0.3017-----------------------------------------|"
- - "L0.3022[85,100] 1.02us |----------------------------------------L0.3022-----------------------------------------|"
- - "L0.3027[85,100] 1.02us |----------------------------------------L0.3027-----------------------------------------|"
- - "L0.3032[85,100] 1.02us |----------------------------------------L0.3032-----------------------------------------|"
- - "L0.3037[85,100] 1.02us |----------------------------------------L0.3037-----------------------------------------|"
- - "**** 1 Output Files (parquet_file_id not yet assigned), 1mb total:"
- - "L0, all files 1mb "
- - "L0.?[85,100] 1.02us |------------------------------------------L0.?------------------------------------------|"
- - "Committing partition 1:"
- - " Soft Deleting 7 files: L0.3007, L0.3012, L0.3017, L0.3022, L0.3027, L0.3032, L0.3037"
- - " Creating 1 files"
- - "**** Simulation run 425, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[45, 66]). 3 Input Files, 297mb total:"
- - "L0 "
- - "L0.3049[55,71] 713ns 106mb |-------L0.3049--------| "
- - "L0.3050[72,84] 713ns 86mb |----L0.3050-----|"
- - "L0.3058[24,40] 714ns 106mb|-------L0.3058--------| "
- - "**** 3 Output Files (parquet_file_id not yet assigned), 297mb total:"
- - "L1 "
- - "L1.?[24,45] 714ns 104mb |------------L1.?-------------| "
- - "L1.?[46,66] 714ns 99mb |------------L1.?------------| "
- - "L1.?[67,84] 714ns 94mb |---------L1.?----------| "
- - "Committing partition 1:"
- - " Soft Deleting 3 files: L0.3049, L0.3050, L0.3058"
- - " Creating 3 files"
- - "**** Simulation run 426, type=split(HighL0OverlapTotalBacklog)(split_times=[44]). 1 Input Files, 104mb total:"
- - "L1, all files 104mb "
- - "L1.3067[24,45] 714ns |----------------------------------------L1.3067-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 104mb total:"
- - "L1 "
- - "L1.?[24,44] 714ns 99mb |---------------------------------------L1.?----------------------------------------| "
- - "L1.?[45,45] 714ns 5mb |L1.?|"
- - "**** Simulation run 427, type=split(HighL0OverlapTotalBacklog)(split_times=[44]). 1 Input Files, 23mb total:"
- - "L0, all files 23mb "
- - "L0.3060[24,48] 814ns |----------------------------------------L0.3060-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 23mb total:"
- - "L0 "
- - "L0.?[24,44] 814ns 19mb |----------------------------------L0.?-----------------------------------| "
- - "L0.?[45,48] 814ns 4mb |--L0.?---| "
- - "**** Simulation run 428, type=split(HighL0OverlapTotalBacklog)(split_times=[44]). 1 Input Files, 23mb total:"
- - "L0, all files 23mb "
- - "L0.3044[24,48] 914ns |----------------------------------------L0.3044-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 23mb total:"
- - "L0 "
- - "L0.?[24,44] 914ns 19mb |----------------------------------L0.?-----------------------------------| "
- - "L0.?[45,48] 914ns 4mb |--L0.?---| "
- - "**** Simulation run 429, type=split(HighL0OverlapTotalBacklog)(split_times=[44]). 1 Input Files, 23mb total:"
- - "L0, all files 23mb "
- - "L0.3046[24,48] 1.01us |----------------------------------------L0.3046-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 23mb total:"
- - "L0 "
- - "L0.?[24,44] 1.01us 19mb |----------------------------------L0.?-----------------------------------| "
- - "L0.?[45,48] 1.01us 4mb |--L0.?---| "
- - "**** Simulation run 430, type=split(HighL0OverlapTotalBacklog)(split_times=[44]). 1 Input Files, 2mb total:"
- - "L0, all files 2mb "
- - "L0.3048[24,54] 1.02us |----------------------------------------L0.3048-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 2mb total:"
- - "L0 "
- - "L0.?[24,44] 1.02us 1mb |---------------------------L0.?---------------------------| "
- - "L0.?[45,54] 1.02us 633kb |----------L0.?-----------|"
- - "**** Simulation run 431, type=split(HighL0OverlapTotalBacklog)(split_times=[44]). 1 Input Files, 93mb total:"
- - "L0, all files 93mb "
- - "L0.3059[41,54] 714ns |----------------------------------------L0.3059-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 93mb total:"
- - "L0 "
- - "L0.?[41,44] 714ns 21mb |-------L0.?-------| "
- - "L0.?[45,54] 714ns 71mb |----------------------------L0.?----------------------------| "
- - "**** Simulation run 432, type=split(HighL0OverlapTotalBacklog)(split_times=[64]). 1 Input Files, 99mb total:"
- - "L1, all files 99mb "
- - "L1.3068[46,66] 714ns |----------------------------------------L1.3068-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 99mb total:"
- - "L1 "
- - "L1.?[46,64] 714ns 89mb |-------------------------------------L1.?--------------------------------------| "
- - "L1.?[65,66] 714ns 10mb |L1.?|"
- - "**** Simulation run 433, type=split(HighL0OverlapTotalBacklog)(split_times=[64]). 1 Input Files, 22mb total:"
- - "L0, all files 22mb "
- - "L0.3051[55,78] 813ns |----------------------------------------L0.3051-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 22mb total:"
- - "L0 "
- - "L0.?[55,64] 813ns 9mb |--------------L0.?---------------| "
- - "L0.?[65,78] 813ns 13mb |----------------------L0.?----------------------| "
- - "**** Simulation run 434, type=split(HighL0OverlapTotalBacklog)(split_times=[64]). 1 Input Files, 22mb total:"
- - "L0, all files 22mb "
- - "L0.3053[55,78] 913ns |----------------------------------------L0.3053-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 22mb total:"
- - "L0 "
- - "L0.?[55,64] 913ns 9mb |--------------L0.?---------------| "
- - "L0.?[65,78] 913ns 13mb |----------------------L0.?----------------------| "
- - "**** Simulation run 435, type=split(HighL0OverlapTotalBacklog)(split_times=[64]). 1 Input Files, 22mb total:"
- - "L0, all files 22mb "
- - "L0.3055[55,78] 1.01us |----------------------------------------L0.3055-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 22mb total:"
- - "L0 "
- - "L0.?[55,64] 1.01us 9mb |--------------L0.?---------------| "
- - "L0.?[65,78] 1.01us 13mb |----------------------L0.?----------------------| "
- - "**** Simulation run 436, type=split(HighL0OverlapTotalBacklog)(split_times=[64]). 1 Input Files, 2mb total:"
- - "L0, all files 2mb "
- - "L0.3057[55,84] 1.02us |----------------------------------------L0.3057-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 2mb total:"
- - "L0 "
- - "L0.?[55,64] 1.02us 615kb |----------L0.?-----------| "
- - "L0.?[65,84] 1.02us 1mb |--------------------------L0.?--------------------------| "
- - "Committing partition 1:"
- - " Soft Deleting 11 files: L0.3044, L0.3046, L0.3048, L0.3051, L0.3053, L0.3055, L0.3057, L0.3059, L0.3060, L1.3067, L1.3068"
- - " Creating 22 files"
- - "**** Simulation run 437, type=split(ReduceOverlap)(split_times=[45]). 1 Input Files, 71mb total:"
- - "L0, all files 71mb "
- - "L0.3081[45,54] 714ns |----------------------------------------L0.3081-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 71mb total:"
- - "L0 "
- - "L0.?[45,45] 714ns 0b |L0.?| "
- - "L0.?[46,54] 714ns 71mb |-------------------------------------L0.?-------------------------------------|"
- - "**** Simulation run 438, type=split(ReduceOverlap)(split_times=[66]). 1 Input Files, 13mb total:"
- - "L0, all files 13mb "
- - "L0.3085[65,78] 813ns |----------------------------------------L0.3085-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 13mb total:"
- - "L0 "
- - "L0.?[65,66] 813ns 1mb |L0.?| "
- - "L0.?[67,78] 813ns 12mb |-----------------------------------L0.?-----------------------------------| "
- - "**** Simulation run 439, type=split(ReduceOverlap)(split_times=[45]). 1 Input Files, 4mb total:"
- - "L0, all files 4mb "
- - "L0.3073[45,48] 814ns |----------------------------------------L0.3073-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 4mb total:"
- - "L0 "
- - "L0.?[45,45] 814ns 0b |L0.?| "
- - "L0.?[46,48] 814ns 4mb |---------------------------L0.?---------------------------|"
- - "**** Simulation run 440, type=split(ReduceOverlap)(split_times=[66]). 1 Input Files, 13mb total:"
- - "L0, all files 13mb "
- - "L0.3087[65,78] 913ns |----------------------------------------L0.3087-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 13mb total:"
- - "L0 "
- - "L0.?[65,66] 913ns 1mb |L0.?| "
- - "L0.?[67,78] 913ns 12mb |-----------------------------------L0.?-----------------------------------| "
- - "**** Simulation run 441, type=split(ReduceOverlap)(split_times=[45]). 1 Input Files, 4mb total:"
- - "L0, all files 4mb "
- - "L0.3075[45,48] 914ns |----------------------------------------L0.3075-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 4mb total:"
- - "L0 "
- - "L0.?[45,45] 914ns 0b |L0.?| "
- - "L0.?[46,48] 914ns 4mb |---------------------------L0.?---------------------------|"
- - "**** Simulation run 442, type=split(ReduceOverlap)(split_times=[66]). 1 Input Files, 13mb total:"
- - "L0, all files 13mb "
- - "L0.3089[65,78] 1.01us |----------------------------------------L0.3089-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 13mb total:"
- - "L0 "
- - "L0.?[65,66] 1.01us 1mb |L0.?| "
- - "L0.?[67,78] 1.01us 12mb |-----------------------------------L0.?-----------------------------------| "
- - "**** Simulation run 443, type=split(ReduceOverlap)(split_times=[45]). 1 Input Files, 4mb total:"
- - "L0, all files 4mb "
- - "L0.3077[45,48] 1.01us |----------------------------------------L0.3077-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 4mb total:"
- - "L0 "
- - "L0.?[45,45] 1.01us 0b |L0.?| "
- - "L0.?[46,48] 1.01us 4mb |---------------------------L0.?---------------------------|"
- - "**** Simulation run 444, type=split(ReduceOverlap)(split_times=[45]). 1 Input Files, 633kb total:"
- - "L0, all files 633kb "
- - "L0.3079[45,54] 1.02us |----------------------------------------L0.3079-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 633kb total:"
- - "L0 "
- - "L0.?[45,45] 1.02us 0b |L0.?| "
- - "L0.?[46,54] 1.02us 633kb |-------------------------------------L0.?-------------------------------------|"
- - "**** Simulation run 445, type=split(ReduceOverlap)(split_times=[66]). 1 Input Files, 1mb total:"
- - "L0, all files 1mb "
- - "L0.3091[65,84] 1.02us |----------------------------------------L0.3091-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:"
- - "L0 "
- - "L0.?[65,66] 1.02us 72kb |L0.?| "
- - "L0.?[67,84] 1.02us 1mb |-------------------------------------L0.?-------------------------------------| "
- - "Committing partition 1:"
- - " Soft Deleting 9 files: L0.3073, L0.3075, L0.3077, L0.3079, L0.3081, L0.3085, L0.3087, L0.3089, L0.3091"
- - " Creating 18 files"
- - "**** Simulation run 446, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[38, 52]). 7 Input Files, 294mb total:"
- - "L0 "
- - "L0.3080[41,44] 714ns 21mb |L0.3080| "
- - "L0.3092[45,45] 714ns 0b |L0.3092| "
- - "L0.3093[46,54] 714ns 71mb |----L0.3093-----| "
- - "L0.3084[55,64] 813ns 9mb |-----L0.3084------| "
- - "L1 "
- - "L1.3070[24,44] 714ns 99mb|------------------L1.3070------------------| "
- - "L1.3071[45,45] 714ns 5mb |L1.3071| "
- - "L1.3082[46,64] 714ns 89mb |---------------L1.3082----------------| "
- - "**** 3 Output Files (parquet_file_id not yet assigned), 294mb total:"
- - "L1 "
- - "L1.?[24,38] 813ns 103mb |------------L1.?-------------| "
- - "L1.?[39,52] 813ns 96mb |-----------L1.?------------| "
- - "L1.?[53,64] 813ns 96mb |---------L1.?---------| "
- - "Committing partition 1:"
- - " Soft Deleting 7 files: L1.3070, L1.3071, L0.3080, L1.3082, L0.3084, L0.3092, L0.3093"
- - " Creating 3 files"
- - "**** Simulation run 447, type=split(HighL0OverlapTotalBacklog)(split_times=[37]). 1 Input Files, 103mb total:"
- - "L1, all files 103mb "
- - "L1.3110[24,38] 813ns |----------------------------------------L1.3110-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 103mb total:"
- - "L1 "
- - "L1.?[24,37] 813ns 96mb |--------------------------------------L1.?---------------------------------------| "
- - "L1.?[38,38] 813ns 7mb |L1.?|"
- - "**** Simulation run 448, type=split(HighL0OverlapTotalBacklog)(split_times=[37]). 1 Input Files, 19mb total:"
- - "L0, all files 19mb "
- - "L0.3072[24,44] 814ns |----------------------------------------L0.3072-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 19mb total:"
- - "L0 "
- - "L0.?[24,37] 814ns 12mb |--------------------------L0.?--------------------------| "
- - "L0.?[38,44] 814ns 7mb |----------L0.?-----------|"
- - "**** Simulation run 449, type=split(HighL0OverlapTotalBacklog)(split_times=[37]). 1 Input Files, 19mb total:"
- - "L0, all files 19mb "
- - "L0.3074[24,44] 914ns |----------------------------------------L0.3074-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 19mb total:"
- - "L0 "
- - "L0.?[24,37] 914ns 12mb |--------------------------L0.?--------------------------| "
- - "L0.?[38,44] 914ns 7mb |----------L0.?-----------|"
- - "**** Simulation run 450, type=split(HighL0OverlapTotalBacklog)(split_times=[37]). 1 Input Files, 19mb total:"
- - "L0, all files 19mb "
- - "L0.3076[24,44] 1.01us |----------------------------------------L0.3076-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 19mb total:"
- - "L0 "
- - "L0.?[24,37] 1.01us 12mb |--------------------------L0.?--------------------------| "
- - "L0.?[38,44] 1.01us 7mb |----------L0.?-----------|"
- - "**** Simulation run 451, type=split(HighL0OverlapTotalBacklog)(split_times=[37]). 1 Input Files, 1mb total:"
- - "L0, all files 1mb "
- - "L0.3078[24,44] 1.02us |----------------------------------------L0.3078-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:"
- - "L0 "
- - "L0.?[24,37] 1.02us 823kb |--------------------------L0.?--------------------------| "
- - "L0.?[38,44] 1.02us 443kb |----------L0.?-----------|"
- - "**** Simulation run 452, type=split(HighL0OverlapTotalBacklog)(split_times=[50]). 1 Input Files, 96mb total:"
- - "L1, all files 96mb "
- - "L1.3111[39,52] 813ns |----------------------------------------L1.3111-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 96mb total:"
- - "L1 "
- - "L1.?[39,50] 813ns 81mb |-----------------------------------L1.?-----------------------------------| "
- - "L1.?[51,52] 813ns 15mb |L1.?| "
- - "**** Simulation run 453, type=split(HighL0OverlapTotalBacklog)(split_times=[50]). 1 Input Files, 633kb total:"
- - "L0, all files 633kb "
- - "L0.3107[46,54] 1.02us |----------------------------------------L0.3107-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 633kb total:"
- - "L0, all files 317kb "
- - "L0.?[46,50] 1.02us |-------------------L0.?--------------------| "
- - "L0.?[51,54] 1.02us |-------------L0.?--------------| "
- - "**** Simulation run 454, type=split(HighL0OverlapTotalBacklog)(split_times=[50]). 1 Input Files, 6mb total:"
- - "L0, all files 6mb "
- - "L0.3061[49,54] 814ns |----------------------------------------L0.3061-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 6mb total:"
- - "L0 "
- - "L0.?[49,50] 814ns 1mb |------L0.?------| "
- - "L0.?[51,54] 814ns 5mb |------------------------L0.?------------------------|"
- - "**** Simulation run 455, type=split(HighL0OverlapTotalBacklog)(split_times=[50]). 1 Input Files, 6mb total:"
- - "L0, all files 6mb "
- - "L0.3045[49,54] 914ns |----------------------------------------L0.3045-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 6mb total:"
- - "L0 "
- - "L0.?[49,50] 914ns 1mb |------L0.?------| "
- - "L0.?[51,54] 914ns 5mb |------------------------L0.?------------------------|"
- - "**** Simulation run 456, type=split(HighL0OverlapTotalBacklog)(split_times=[50]). 1 Input Files, 6mb total:"
- - "L0, all files 6mb "
- - "L0.3047[49,54] 1.01us |----------------------------------------L0.3047-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 6mb total:"
- - "L0 "
- - "L0.?[49,50] 1.01us 1mb |------L0.?------| "
- - "L0.?[51,54] 1.01us 5mb |------------------------L0.?------------------------|"
- - "Committing partition 1:"
- - " Soft Deleting 10 files: L0.3045, L0.3047, L0.3061, L0.3072, L0.3074, L0.3076, L0.3078, L0.3107, L1.3110, L1.3111"
- - " Creating 20 files"
- - "**** Simulation run 457, type=split(ReduceOverlap)(split_times=[38]). 1 Input Files, 7mb total:"
- - "L0, all files 7mb "
- - "L0.3116[38,44] 814ns |----------------------------------------L0.3116-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 7mb total:"
- - "L0 "
- - "L0.?[38,38] 814ns 0b |L0.?| "
- - "L0.?[39,44] 814ns 7mb |----------------------------------L0.?-----------------------------------|"
- - "**** Simulation run 458, type=split(ReduceOverlap)(split_times=[52]). 1 Input Files, 5mb total:"
- - "L0, all files 5mb "
- - "L0.3128[51,54] 814ns |----------------------------------------L0.3128-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 5mb total:"
- - "L0 "
- - "L0.?[51,52] 814ns 2mb |------------L0.?------------| "
- - "L0.?[53,54] 814ns 3mb |------------L0.?------------|"
- - "**** Simulation run 459, type=split(ReduceOverlap)(split_times=[38]). 1 Input Files, 7mb total:"
- - "L0, all files 7mb "
- - "L0.3118[38,44] 914ns |----------------------------------------L0.3118-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 7mb total:"
- - "L0 "
- - "L0.?[38,38] 914ns 0b |L0.?| "
- - "L0.?[39,44] 914ns 7mb |----------------------------------L0.?-----------------------------------|"
- - "**** Simulation run 460, type=split(ReduceOverlap)(split_times=[52]). 1 Input Files, 5mb total:"
- - "L0, all files 5mb "
- - "L0.3130[51,54] 914ns |----------------------------------------L0.3130-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 5mb total:"
- - "L0 "
- - "L0.?[51,52] 914ns 2mb |------------L0.?------------| "
- - "L0.?[53,54] 914ns 3mb |------------L0.?------------|"
- - "**** Simulation run 461, type=split(ReduceOverlap)(split_times=[38]). 1 Input Files, 7mb total:"
- - "L0, all files 7mb "
- - "L0.3120[38,44] 1.01us |----------------------------------------L0.3120-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 7mb total:"
- - "L0 "
- - "L0.?[38,38] 1.01us 0b |L0.?| "
- - "L0.?[39,44] 1.01us 7mb |----------------------------------L0.?-----------------------------------|"
- - "**** Simulation run 462, type=split(ReduceOverlap)(split_times=[52]). 1 Input Files, 5mb total:"
- - "L0, all files 5mb "
- - "L0.3132[51,54] 1.01us |----------------------------------------L0.3132-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 5mb total:"
- - "L0 "
- - "L0.?[51,52] 1.01us 2mb |------------L0.?------------| "
- - "L0.?[53,54] 1.01us 3mb |------------L0.?------------|"
- - "**** Simulation run 463, type=split(ReduceOverlap)(split_times=[38]). 1 Input Files, 443kb total:"
- - "L0, all files 443kb "
- - "L0.3122[38,44] 1.02us |----------------------------------------L0.3122-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 443kb total:"
- - "L0 "
- - "L0.?[38,38] 1.02us 0b |L0.?| "
- - "L0.?[39,44] 1.02us 443kb |----------------------------------L0.?-----------------------------------|"
- - "**** Simulation run 464, type=split(ReduceOverlap)(split_times=[52]). 1 Input Files, 317kb total:"
- - "L0, all files 317kb "
- - "L0.3126[51,54] 1.02us |----------------------------------------L0.3126-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 317kb total:"
- - "L0 "
- - "L0.?[51,52] 1.02us 106kb |------------L0.?------------| "
- - "L0.?[53,54] 1.02us 211kb |------------L0.?------------|"
- - "Committing partition 1:"
- - " Soft Deleting 8 files: L0.3116, L0.3118, L0.3120, L0.3122, L0.3126, L0.3128, L0.3130, L0.3132"
- - " Creating 16 files"
- - "**** Simulation run 465, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[78, 91]). 7 Input Files, 272mb total:"
- - "L0 "
- - "L0.3094[65,66] 813ns 1mb |L0.3094| "
- - "L0.3095[67,78] 813ns 12mb |---------L0.3095----------| "
- - "L0.3052[79,84] 813ns 6mb |-L0.3052--| "
- - "L0.3062[85,96] 813ns 109mb |---------L0.3062----------| "
- - "L0.3063[97,100] 813ns 40mb |L0.3063|"
- - "L1 "
- - "L1.3083[65,66] 714ns 10mb|L1.3083| "
- - "L1.3069[67,84] 714ns 94mb |-----------------L1.3069-----------------| "
- - "**** 3 Output Files (parquet_file_id not yet assigned), 272mb total:"
- - "L1 "
- - "L1.?[65,78] 813ns 101mb |-------------L1.?--------------| "
- - "L1.?[79,91] 813ns 93mb |------------L1.?------------| "
- - "L1.?[92,100] 813ns 78mb |-------L1.?-------| "
- - "Committing partition 1:"
- - " Soft Deleting 7 files: L0.3052, L0.3062, L0.3063, L1.3069, L1.3083, L0.3094, L0.3095"
- - " Creating 3 files"
- - "**** Simulation run 466, type=split(HighL0OverlapTotalBacklog)(split_times=[76]). 1 Input Files, 101mb total:"
- - "L1, all files 101mb "
- - "L1.3149[65,78] 813ns |----------------------------------------L1.3149-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 101mb total:"
- - "L1 "
- - "L1.?[65,76] 813ns 85mb |-----------------------------------L1.?-----------------------------------| "
- - "L1.?[77,78] 813ns 16mb |L1.?| "
- - "**** Simulation run 467, type=split(HighL0OverlapTotalBacklog)(split_times=[76]). 1 Input Files, 12mb total:"
- - "L0, all files 12mb "
- - "L0.3099[67,78] 913ns |----------------------------------------L0.3099-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 12mb total:"
- - "L0 "
- - "L0.?[67,76] 913ns 10mb |---------------------------------L0.?----------------------------------| "
- - "L0.?[77,78] 913ns 2mb |-L0.?-| "
- - "**** Simulation run 468, type=split(HighL0OverlapTotalBacklog)(split_times=[76]). 1 Input Files, 12mb total:"
- - "L0, all files 12mb "
- - "L0.3103[67,78] 1.01us |----------------------------------------L0.3103-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 12mb total:"
- - "L0 "
- - "L0.?[67,76] 1.01us 10mb |---------------------------------L0.?----------------------------------| "
- - "L0.?[77,78] 1.01us 2mb |-L0.?-| "
- - "**** Simulation run 469, type=split(HighL0OverlapTotalBacklog)(split_times=[76]). 1 Input Files, 1mb total:"
- - "L0, all files 1mb "
- - "L0.3109[67,84] 1.02us |----------------------------------------L0.3109-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:"
- - "L0 "
- - "L0.?[67,76] 1.02us 685kb |--------------------L0.?---------------------| "
- - "L0.?[77,84] 1.02us 609kb |---------------L0.?----------------| "
- - "**** Simulation run 470, type=split(HighL0OverlapTotalBacklog)(split_times=[87]). 1 Input Files, 93mb total:"
- - "L1, all files 93mb "
- - "L1.3150[79,91] 813ns |----------------------------------------L1.3150-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 93mb total:"
- - "L1 "
- - "L1.?[79,87] 813ns 62mb |---------------------------L1.?---------------------------| "
- - "L1.?[88,91] 813ns 31mb |--------L1.?--------| "
- - "**** Simulation run 471, type=split(HighL0OverlapTotalBacklog)(split_times=[87]). 1 Input Files, 30mb total:"
- - "L0, all files 30mb "
- - "L0.3064[85,97] 1.01us |----------------------------------------L0.3064-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 30mb total:"
- - "L0 "
- - "L0.?[85,87] 1.01us 5mb |----L0.?-----| "
- - "L0.?[88,97] 1.01us 25mb |------------------------------L0.?-------------------------------| "
- - "**** Simulation run 472, type=split(HighL0OverlapTotalBacklog)(split_times=[87]). 1 Input Files, 1mb total:"
- - "L0, all files 1mb "
- - "L0.3066[85,100] 1.02us |----------------------------------------L0.3066-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:"
- - "L0 "
- - "L0.?[85,87] 1.02us 179kb |---L0.?---| "
- - "L0.?[88,100] 1.02us 1mb |---------------------------------L0.?---------------------------------|"
- - "Committing partition 1:"
- - " Soft Deleting 7 files: L0.3064, L0.3066, L0.3099, L0.3103, L0.3109, L1.3149, L1.3150"
- - " Creating 14 files"
- - "**** Simulation run 473, type=split(ReduceOverlap)(split_times=[91]). 1 Input Files, 25mb total:"
- - "L0, all files 25mb "
- - "L0.3163[88,97] 1.01us |----------------------------------------L0.3163-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 25mb total:"
- - "L0 "
- - "L0.?[88,91] 1.01us 8mb |------------L0.?------------| "
- - "L0.?[92,97] 1.01us 17mb |----------------------L0.?----------------------|"
- - "**** Simulation run 474, type=split(ReduceOverlap)(split_times=[78]). 1 Input Files, 609kb total:"
- - "L0, all files 609kb "
- - "L0.3159[77,84] 1.02us |----------------------------------------L0.3159-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 609kb total:"
- - "L0 "
- - "L0.?[77,78] 1.02us 87kb |---L0.?---| "
- - "L0.?[79,84] 1.02us 522kb |-----------------------------L0.?-----------------------------| "
- - "**** Simulation run 475, type=split(ReduceOverlap)(split_times=[91]). 1 Input Files, 1mb total:"
- - "L0, all files 1mb "
- - "L0.3165[88,100] 1.02us |----------------------------------------L0.3165-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:"
- - "L0 "
- - "L0.?[88,91] 1.02us 291kb |--------L0.?--------| "
- - "L0.?[92,100] 1.02us 874kb |---------------------------L0.?---------------------------|"
- - "Committing partition 1:"
- - " Soft Deleting 3 files: L0.3159, L0.3163, L0.3165"
- - " Creating 6 files"
- - "**** Simulation run 476, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[37, 50]). 11 Input Files, 224mb total:"
- - "L0 "
- - "L0.3115[24,37] 814ns 12mb|----------------L0.3115----------------| "
- - "L0.3133[38,38] 814ns 0b |L0.3133| "
- - "L0.3134[39,44] 814ns 7mb |---L0.3134----| "
- - "L0.3096[45,45] 814ns 0b |L0.3096| "
- - "L0.3097[46,48] 814ns 4mb |L0.3097| "
- - "L0.3127[49,50] 814ns 1mb |L0.3127| "
- - "L0.3135[51,52] 814ns 2mb |L0.3135|"
- - "L1 "
- - "L1.3113[24,37] 813ns 96mb|----------------L1.3113----------------| "
- - "L1.3114[38,38] 813ns 7mb |L1.3114| "
- - "L1.3123[39,50] 813ns 81mb |-------------L1.3123-------------| "
- - "L1.3124[51,52] 813ns 15mb |L1.3124|"
- - "**** 3 Output Files (parquet_file_id not yet assigned), 224mb total:"
- - "L1 "
- - "L1.?[24,37] 814ns 104mb |-----------------L1.?------------------| "
- - "L1.?[38,50] 814ns 96mb |----------------L1.?----------------| "
- - "L1.?[51,52] 814ns 24mb |L1.?|"
- - "Committing partition 1:"
- - " Soft Deleting 11 files: L0.3096, L0.3097, L1.3113, L1.3114, L0.3115, L1.3123, L1.3124, L0.3127, L0.3133, L0.3134, L0.3135"
- - " Creating 3 files"
- - "**** Simulation run 477, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[65, 77]). 10 Input Files, 289mb total:"
- - "L0 "
- - "L0.3136[53,54] 814ns 3mb |L0.3136| "
- - "L0.3086[55,64] 913ns 9mb |-------L0.3086-------| "
- - "L0.3098[65,66] 913ns 1mb |L0.3098| "
- - "L0.3154[67,76] 913ns 10mb |-------L0.3154-------| "
- - "L0.3155[77,78] 913ns 2mb |L0.3155| "
- - "L0.3054[79,84] 913ns 6mb |--L0.3054--| "
- - "L1 "
- - "L1.3112[53,64] 813ns 96mb|----------L1.3112----------| "
- - "L1.3152[65,76] 813ns 85mb |----------L1.3152----------| "
- - "L1.3153[77,78] 813ns 16mb |L1.3153| "
- - "L1.3160[79,87] 813ns 62mb |------L1.3160------| "
- - "**** 3 Output Files (parquet_file_id not yet assigned), 289mb total:"
- - "L1 "
- - "L1.?[53,65] 913ns 102mb |------------L1.?-------------| "
- - "L1.?[66,77] 913ns 94mb |-----------L1.?------------| "
- - "L1.?[78,87] 913ns 94mb |--------L1.?---------| "
- - "Committing partition 1:"
- - " Soft Deleting 10 files: L0.3054, L0.3086, L0.3098, L1.3112, L0.3136, L1.3152, L1.3153, L0.3154, L0.3155, L1.3160"
- - " Creating 3 files"
- - "**** Simulation run 478, type=split(HighL0OverlapTotalBacklog)(split_times=[64]). 1 Input Files, 102mb total:"
- - "L1, all files 102mb "
- - "L1.3175[53,65] 913ns |----------------------------------------L1.3175-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 102mb total:"
- - "L1 "
- - "L1.?[53,64] 913ns 94mb |--------------------------------------L1.?--------------------------------------| "
- - "L1.?[65,65] 913ns 9mb |L1.?|"
- - "**** Simulation run 479, type=split(HighL0OverlapTotalBacklog)(split_times=[75]). 1 Input Files, 94mb total:"
- - "L1, all files 94mb "
- - "L1.3176[66,77] 913ns |----------------------------------------L1.3176-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 94mb total:"
- - "L1 "
- - "L1.?[66,75] 913ns 77mb |---------------------------------L1.?----------------------------------| "
- - "L1.?[76,77] 913ns 17mb |-L1.?-| "
- - "**** Simulation run 480, type=split(HighL0OverlapTotalBacklog)(split_times=[75]). 1 Input Files, 10mb total:"
- - "L0, all files 10mb "
- - "L0.3156[67,76] 1.01us |----------------------------------------L0.3156-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 10mb total:"
- - "L0 "
- - "L0.?[67,75] 1.01us 9mb |-------------------------------------L0.?-------------------------------------| "
- - "L0.?[76,76] 1.01us 1mb |L0.?|"
- - "**** Simulation run 481, type=split(HighL0OverlapTotalBacklog)(split_times=[75]). 1 Input Files, 685kb total:"
- - "L0, all files 685kb "
- - "L0.3158[67,76] 1.02us |----------------------------------------L0.3158-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 685kb total:"
- - "L0 "
- - "L0.?[67,75] 1.02us 609kb |-------------------------------------L0.?-------------------------------------| "
- - "L0.?[76,76] 1.02us 76kb |L0.?|"
- - "Committing partition 1:"
- - " Soft Deleting 4 files: L0.3156, L0.3158, L1.3175, L1.3176"
- - " Creating 8 files"
- - "**** Simulation run 482, type=split(ReduceOverlap)(split_times=[65]). 1 Input Files, 1mb total:"
- - "L0, all files 1mb "
- - "L0.3102[65,66] 1.01us |----------------------------------------L0.3102-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:"
- - "L0 "
- - "L0.?[65,65] 1.01us 0b |L0.?| "
- - "L0.?[66,66] 1.01us 1mb |L0.?|"
- - "**** Simulation run 483, type=split(ReduceOverlap)(split_times=[77]). 1 Input Files, 2mb total:"
- - "L0, all files 2mb "
- - "L0.3157[77,78] 1.01us |----------------------------------------L0.3157-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 2mb total:"
- - "L0 "
- - "L0.?[77,77] 1.01us 0b |L0.?| "
- - "L0.?[78,78] 1.01us 2mb |L0.?|"
- - "**** Simulation run 484, type=split(ReduceOverlap)(split_times=[65]). 1 Input Files, 72kb total:"
- - "L0, all files 72kb "
- - "L0.3108[65,66] 1.02us |----------------------------------------L0.3108-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 72kb total:"
- - "L0 "
- - "L0.?[65,65] 1.02us 0b |L0.?| "
- - "L0.?[66,66] 1.02us 72kb |L0.?|"
- - "**** Simulation run 485, type=split(ReduceOverlap)(split_times=[77]). 1 Input Files, 87kb total:"
- - "L0, all files 87kb "
- - "L0.3168[77,78] 1.02us |----------------------------------------L0.3168-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 87kb total:"
- - "L0 "
- - "L0.?[77,77] 1.02us 0b |L0.?| "
- - "L0.?[78,78] 1.02us 87kb |L0.?|"
- - "Committing partition 1:"
- - " Soft Deleting 4 files: L0.3102, L0.3108, L0.3157, L0.3168"
- - " Creating 8 files"
- - "**** Simulation run 486, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[36, 48]). 10 Input Files, 250mb total:"
- - "L0 "
- - "L0.3117[24,37] 914ns 12mb|----------------L0.3117----------------| "
- - "L0.3137[38,38] 914ns 0b |L0.3137| "
- - "L0.3138[39,44] 914ns 7mb |---L0.3138----| "
- - "L0.3100[45,45] 914ns 0b |L0.3100| "
- - "L0.3101[46,48] 914ns 4mb |L0.3101| "
- - "L0.3129[49,50] 914ns 1mb |L0.3129| "
- - "L0.3139[51,52] 914ns 2mb |L0.3139|"
- - "L1 "
- - "L1.3172[24,37] 814ns 104mb|----------------L1.3172----------------| "
- - "L1.3173[38,50] 814ns 96mb |--------------L1.3173---------------| "
- - "L1.3174[51,52] 814ns 24mb |L1.3174|"
- - "**** 3 Output Files (parquet_file_id not yet assigned), 250mb total:"
- - "L1 "
- - "L1.?[24,36] 914ns 107mb |----------------L1.?----------------| "
- - "L1.?[37,48] 914ns 98mb |--------------L1.?---------------| "
- - "L1.?[49,52] 914ns 45mb |-L1.?--| "
- - "Committing partition 1:"
- - " Soft Deleting 10 files: L0.3100, L0.3101, L0.3117, L0.3129, L0.3137, L0.3138, L0.3139, L1.3172, L1.3173, L1.3174"
- - " Creating 3 files"
- - "**** Simulation run 487, type=split(ReduceOverlap)(split_times=[36]). 1 Input Files, 12mb total:"
- - "L0, all files 12mb "
- - "L0.3119[24,37] 1.01us |----------------------------------------L0.3119-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 12mb total:"
- - "L0 "
- - "L0.?[24,36] 1.01us 11mb |--------------------------------------L0.?---------------------------------------| "
- - "L0.?[37,37] 1.01us 977kb |L0.?|"
- - "**** Simulation run 488, type=split(ReduceOverlap)(split_times=[36]). 1 Input Files, 823kb total:"
- - "L0, all files 823kb "
- - "L0.3121[24,37] 1.02us |----------------------------------------L0.3121-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 823kb total:"
- - "L0 "
- - "L0.?[24,36] 1.02us 760kb |--------------------------------------L0.?---------------------------------------| "
- - "L0.?[37,37] 1.02us 63kb |L0.?|"
- - "**** Simulation run 489, type=split(ReduceOverlap)(split_times=[48]). 1 Input Files, 317kb total:"
- - "L0, all files 317kb "
- - "L0.3125[46,50] 1.02us |----------------------------------------L0.3125-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 317kb total:"
- - "L0, all files 158kb "
- - "L0.?[46,48] 1.02us |-------------------L0.?--------------------| "
- - "L0.?[49,50] 1.02us |--------L0.?--------| "
- - "Committing partition 1:"
- - " Soft Deleting 3 files: L0.3119, L0.3121, L0.3125"
- - " Creating 6 files"
- - "**** Simulation run 490, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[64, 75]). 11 Input Files, 219mb total:"
- - "L0 "
- - "L0.3140[53,54] 914ns 3mb |L0.3140| "
- - "L0.3088[55,64] 1.01us 9mb |------------L0.3088------------| "
- - "L0.3186[65,65] 1.01us 0b |L0.3186| "
- - "L0.3187[66,66] 1.01us 1mb |L0.3187| "
- - "L0.3182[67,75] 1.01us 9mb |----------L0.3182-----------| "
- - "L0.3183[76,76] 1.01us 1mb |L0.3183|"
- - "L0.3188[77,77] 1.01us 0b |L0.3188|"
- - "L1 "
- - "L1.3178[53,64] 913ns 94mb|----------------L1.3178----------------| "
- - "L1.3179[65,65] 913ns 9mb |L1.3179| "
- - "L1.3180[66,75] 913ns 77mb |------------L1.3180------------| "
- - "L1.3181[76,77] 913ns 17mb |L1.3181|"
- - "**** 3 Output Files (parquet_file_id not yet assigned), 219mb total:"
- - "L1 "
- - "L1.?[53,64] 1.01us 100mb |-----------------L1.?------------------| "
- - "L1.?[65,75] 1.01us 91mb |---------------L1.?----------------| "
- - "L1.?[76,77] 1.01us 27mb |L1.?|"
- - "Committing partition 1:"
- - " Soft Deleting 11 files: L0.3088, L0.3140, L1.3178, L1.3179, L1.3180, L1.3181, L0.3182, L0.3183, L0.3186, L0.3187, L0.3188"
- - " Creating 3 files"
- - "**** Simulation run 491, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[87, 96]). 9 Input Files, 248mb total:"
- - "L0 "
- - "L0.3189[78,78] 1.01us 2mb|L0.3189| "
- - "L0.3056[79,84] 1.01us 6mb |-----L0.3056------| "
- - "L0.3162[85,87] 1.01us 5mb |L0.3162| "
- - "L0.3166[88,91] 1.01us 8mb |-L0.3166--| "
- - "L0.3167[92,97] 1.01us 17mb |-----L0.3167------| "
- - "L0.3065[98,100] 1.01us 8mb |L0.3065|"
- - "L1 "
- - "L1.3177[78,87] 913ns 94mb|-------------L1.3177--------------| "
- - "L1.3161[88,91] 813ns 31mb |-L1.3161--| "
- - "L1.3151[92,100] 813ns 78mb |-----------L1.3151------------| "
- - "**** 3 Output Files (parquet_file_id not yet assigned), 248mb total:"
- - "L1 "
- - "L1.?[78,87] 1.01us 101mb |---------------L1.?---------------| "
- - "L1.?[88,96] 1.01us 90mb |-------------L1.?-------------| "
- - "L1.?[97,100] 1.01us 56mb |---L1.?---| "
- - "Committing partition 1:"
- - " Soft Deleting 9 files: L0.3056, L0.3065, L1.3151, L1.3161, L0.3162, L0.3166, L0.3167, L1.3177, L0.3189"
- - " Creating 3 files"
- - "**** Simulation run 492, type=split(ReduceOverlap)(split_times=[96]). 1 Input Files, 874kb total:"
- - "L0, all files 874kb "
- - "L0.3171[92,100] 1.02us |----------------------------------------L0.3171-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 874kb total:"
- - "L0, all files 437kb "
- - "L0.?[92,96] 1.02us |-------------------L0.?--------------------| "
- - "L0.?[97,100] 1.02us |-------------L0.?--------------| "
- - "Committing partition 1:"
- - " Soft Deleting 1 files: L0.3171"
- - " Creating 2 files"
- - "**** Simulation run 493, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[35, 46]). 11 Input Files, 275mb total:"
- - "L0 "
- - "L0.3197[24,36] 1.01us 11mb|--------------L0.3197---------------| "
- - "L0.3198[37,37] 1.01us 977kb |L0.3198| "
- - "L0.3141[38,38] 1.01us 0b |L0.3141| "
- - "L0.3142[39,44] 1.01us 7mb |---L0.3142----| "
- - "L0.3104[45,45] 1.01us 0b |L0.3104| "
- - "L0.3105[46,48] 1.01us 4mb |L0.3105| "
- - "L0.3131[49,50] 1.01us 1mb |L0.3131| "
- - "L0.3143[51,52] 1.01us 2mb |L0.3143|"
- - "L1 "
- - "L1.3194[24,36] 914ns 107mb|--------------L1.3194---------------| "
- - "L1.3195[37,48] 914ns 98mb |-------------L1.3195-------------| "
- - "L1.3196[49,52] 914ns 45mb |L1.3196| "
- - "**** 3 Output Files (parquet_file_id not yet assigned), 275mb total:"
- - "L1 "
- - "L1.?[24,35] 1.01us 108mb |--------------L1.?---------------| "
- - "L1.?[36,46] 1.01us 98mb |-------------L1.?-------------| "
- - "L1.?[47,52] 1.01us 69mb |-----L1.?-----| "
- - "Committing partition 1:"
- - " Soft Deleting 11 files: L0.3104, L0.3105, L0.3131, L0.3141, L0.3142, L0.3143, L1.3194, L1.3195, L1.3196, L0.3197, L0.3198"
- - " Creating 3 files"
- - "**** Simulation run 494, type=split(ReduceOverlap)(split_times=[35]). 1 Input Files, 760kb total:"
- - "L0, all files 760kb "
- - "L0.3199[24,36] 1.02us |----------------------------------------L0.3199-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 760kb total:"
- - "L0 "
- - "L0.?[24,35] 1.02us 697kb |--------------------------------------L0.?--------------------------------------| "
- - "L0.?[36,36] 1.02us 63kb |L0.?|"
- - "**** Simulation run 495, type=split(ReduceOverlap)(split_times=[46]). 1 Input Files, 158kb total:"
- - "L0, all files 158kb "
- - "L0.3201[46,48] 1.02us |----------------------------------------L0.3201-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 158kb total:"
- - "L0 "
- - "L0.?[46,46] 1.02us 0b |L0.?| "
- - "L0.?[47,48] 1.02us 158kb |-------------------L0.?--------------------|"
- - "Committing partition 1:"
- - " Soft Deleting 2 files: L0.3199, L0.3201"
- - " Creating 4 files"
- - "**** Simulation run 496, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[61]). 2 Input Files, 103mb total:"
- - "L0 "
- - "L0.3144[53,54] 1.01us 3mb|L0.3144| "
- - "L1 "
- - "L1.3203[53,64] 1.01us 100mb|----------------------------------------L1.3203-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 103mb total:"
- - "L1 "
- - "L1.?[53,61] 1.01us 75mb |-----------------------------L1.?------------------------------| "
- - "L1.?[62,64] 1.01us 28mb |-----L1.?-----| "
- - "Committing partition 1:"
- - " Soft Deleting 2 files: L0.3144, L1.3203"
- - " Creating 2 files"
- - "**** Simulation run 497, type=split(ReduceOverlap)(split_times=[61]). 1 Input Files, 615kb total:"
- - "L0, all files 615kb "
- - "L0.3090[55,64] 1.02us |----------------------------------------L0.3090-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 615kb total:"
- - "L0 "
- - "L0.?[55,61] 1.02us 410kb |---------------------------L0.?---------------------------| "
- - "L0.?[62,64] 1.02us 205kb |-------L0.?-------|"
- - "Committing partition 1:"
- - " Soft Deleting 1 files: L0.3090"
- - " Creating 2 files"
- - "**** Simulation run 498, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[35, 46]). 13 Input Files, 277mb total:"
- - "L0 "
- - "L0.3214[24,35] 1.02us 697kb|-------------L0.3214-------------| "
- - "L0.3215[36,36] 1.02us 63kb |L0.3215| "
- - "L0.3200[37,37] 1.02us 63kb |L0.3200| "
- - "L0.3145[38,38] 1.02us 0b |L0.3145| "
- - "L0.3146[39,44] 1.02us 443kb |---L0.3146----| "
- - "L0.3106[45,45] 1.02us 0b |L0.3106| "
- - "L0.3216[46,46] 1.02us 0b |L0.3216| "
- - "L0.3217[47,48] 1.02us 158kb |L0.3217| "
- - "L0.3202[49,50] 1.02us 158kb |L0.3202| "
- - "L0.3147[51,52] 1.02us 106kb |L0.3147|"
- - "L1 "
- - "L1.3211[24,35] 1.01us 108mb|-------------L1.3211-------------| "
- - "L1.3212[36,46] 1.01us 98mb |-----------L1.3212------------| "
- - "L1.3213[47,52] 1.01us 69mb |---L1.3213----| "
- - "**** 3 Output Files (parquet_file_id not yet assigned), 277mb total:"
- - "L1 "
- - "L1.?[24,35] 1.02us 109mb |--------------L1.?---------------| "
- - "L1.?[36,46] 1.02us 99mb |-------------L1.?-------------| "
- - "L1.?[47,52] 1.02us 69mb |-----L1.?-----| "
- - "Committing partition 1:"
- - " Soft Deleting 13 files: L0.3106, L0.3145, L0.3146, L0.3147, L0.3200, L0.3202, L1.3211, L1.3212, L1.3213, L0.3214, L0.3215, L0.3216, L0.3217"
- - " Creating 3 files"
- - "**** Simulation run 499, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[64, 75]). 12 Input Files, 223mb total:"
- - "L0 "
- - "L0.3148[53,54] 1.02us 211kb|L0.3148| "
- - "L0.3220[55,61] 1.02us 410kb |------L0.3220-------| "
- - "L0.3221[62,64] 1.02us 205kb |L0.3221| "
- - "L0.3190[65,65] 1.02us 0b |L0.3190| "
- - "L0.3191[66,66] 1.02us 72kb |L0.3191| "
- - "L0.3184[67,75] 1.02us 609kb |----------L0.3184-----------| "
- - "L0.3185[76,76] 1.02us 76kb |L0.3185|"
- - "L0.3192[77,77] 1.02us 0b |L0.3192|"
- - "L1 "
- - "L1.3218[53,61] 1.01us 75mb|----------L1.3218-----------| "
- - "L1.3219[62,64] 1.01us 28mb |L1.3219| "
- - "L1.3204[65,75] 1.01us 91mb |--------------L1.3204--------------| "
- - "L1.3205[76,77] 1.01us 27mb |L1.3205|"
- - "**** 3 Output Files (parquet_file_id not yet assigned), 223mb total:"
- - "L1 "
- - "L1.?[53,64] 1.02us 102mb |-----------------L1.?------------------| "
- - "L1.?[65,75] 1.02us 93mb |---------------L1.?----------------| "
- - "L1.?[76,77] 1.02us 28mb |L1.?|"
- - "Committing partition 1:"
- - " Soft Deleting 12 files: L0.3148, L0.3184, L0.3185, L0.3190, L0.3191, L0.3192, L1.3204, L1.3205, L1.3218, L1.3219, L0.3220, L0.3221"
- - " Creating 3 files"
- - "**** Simulation run 500, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[87, 96]). 9 Input Files, 250mb total:"
- - "L0 "
- - "L0.3210[97,100] 1.02us 437kb |-L0.3210--| "
- - "L0.3209[92,96] 1.02us 437kb |---L0.3209----| "
- - "L0.3170[88,91] 1.02us 291kb |-L0.3170--| "
- - "L0.3164[85,87] 1.02us 179kb |L0.3164| "
- - "L0.3169[79,84] 1.02us 522kb |-----L0.3169------| "
- - "L0.3193[78,78] 1.02us 87kb|L0.3193| "
- - "L1 "
- - "L1.3206[78,87] 1.01us 101mb|-------------L1.3206--------------| "
- - "L1.3208[97,100] 1.01us 56mb |-L1.3208--| "
- - "L1.3207[88,96] 1.01us 90mb |-----------L1.3207------------| "
- - "**** 3 Output Files (parquet_file_id not yet assigned), 250mb total:"
- - "L1 "
- - "L1.?[78,87] 1.02us 102mb |---------------L1.?---------------| "
- - "L1.?[88,96] 1.02us 91mb |-------------L1.?-------------| "
- - "L1.?[97,100] 1.02us 57mb |---L1.?---| "
- - "Committing partition 1:"
- - " Soft Deleting 9 files: L0.3164, L0.3169, L0.3170, L0.3193, L1.3206, L1.3207, L1.3208, L0.3209, L0.3210"
- - " Creating 3 files"
- - "**** Simulation run 501, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[47, 58]). 3 Input Files, 271mb total:"
- - "L1 "
- - "L1.3223[36,46] 1.02us 99mb|-----------L1.3223------------| "
- - "L1.3224[47,52] 1.02us 69mb |---L1.3224----| "
- - "L1.3225[53,64] 1.02us 102mb |-------------L1.3225-------------| "
- - "**** 3 Output Files (parquet_file_id not yet assigned), 271mb total:"
- - "L2 "
- - "L2.?[36,47] 1.02us 106mb |--------------L2.?---------------| "
- - "L2.?[48,58] 1.02us 97mb |-------------L2.?-------------| "
- - "L2.?[59,64] 1.02us 68mb |-----L2.?-----| "
- - "Committing partition 1:"
- - " Soft Deleting 3 files: L1.3223, L1.3224, L1.3225"
- - " Upgrading 1 files level to CompactionLevel::L2: L1.3222"
- - " Creating 3 files"
- - "**** Simulation run 502, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[75, 85]). 3 Input Files, 223mb total:"
- - "L1 "
- - "L1.3226[65,75] 1.02us 93mb|---------------L1.3226----------------| "
- - "L1.3227[76,77] 1.02us 28mb |L1.3227| "
- - "L1.3228[78,87] 1.02us 102mb |-------------L1.3228--------------| "
- - "**** 3 Output Files (parquet_file_id not yet assigned), 223mb total:"
- - "L2 "
- - "L2.?[65,75] 1.02us 101mb |-----------------L2.?-----------------| "
- - "L2.?[76,85] 1.02us 91mb |---------------L2.?---------------| "
- - "L2.?[86,87] 1.02us 30mb |L2.?|"
- - "Committing partition 1:"
- - " Soft Deleting 3 files: L1.3226, L1.3227, L1.3228"
- - " Creating 3 files"
- - "**** Simulation run 503, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[97]). 2 Input Files, 148mb total:"
- - "L1 "
- - "L1.3230[97,100] 1.02us 57mb |------L1.3230-------| "
- - "L1.3229[88,96] 1.02us 91mb|-------------------------L1.3229--------------------------| "
- - "**** 2 Output Files (parquet_file_id not yet assigned), 148mb total:"
- - "L2 "
- - "L2.?[88,97] 1.02us 111mb |------------------------------L2.?-------------------------------| "
- - "L2.?[98,100] 1.02us 37mb |----L2.?-----|"
- - "Committing partition 1:"
- - " Soft Deleting 2 files: L1.3229, L1.3230"
- - " Creating 2 files"
- "**** Final Output Files (7.41gb written)"
- "L2 "
- "L2.3222[24,35] 1.02us 109mb|--L2.3222--| "
diff --git a/compactor/tests/layouts/stuck.rs b/compactor/tests/layouts/stuck.rs
index 8cff3f38d1..3fc523ee75 100644
--- a/compactor/tests/layouts/stuck.rs
+++ b/compactor/tests/layouts/stuck.rs
@@ -12,7 +12,7 @@ use crate::layouts::{layout_setup_builder, parquet_builder, run_layout_scenario,
const MAX_DESIRED_FILE_SIZE: u64 = 100 * ONE_MB;
#[tokio::test]
-async fn stuck() {
+async fn stuck_l0() {
test_helpers::maybe_start_logging();
let setup = layout_setup_builder()
@@ -20,6 +20,7 @@ async fn stuck() {
.with_max_num_files_per_plan(20)
.with_max_desired_file_size_bytes(MAX_DESIRED_FILE_SIZE)
.with_partition_timeout(Duration::from_millis(10000))
+ .with_suppress_run_output() // remove this to debug
.build()
.await;
@@ -1110,6877 +1111,6 @@ async fn stuck() {
- "L2.59[1686863759000000000,1686867839000000000] 1686928811.43s 96mb |--L2.59--| "
- "L2.74[1686867899000000000,1686868319000000000] 1686928811.43s 14mb |L2.74| "
- "L2.78[1686868379000000000,1686873599000000000] 1686928118.43s 39mb |---L2.78----| "
- - "**** Simulation run 0, type=split(HighL0OverlapTotalBacklog)(split_times=[1686842249810810810, 1686843120621621620, 1686843991432432430, 1686844862243243240]). 1 Input Files, 5mb total:"
- - "L1, all files 5mb "
- - "L1.6[1686841379000000000,1686845579000000000] 1686928854.57s|------------------------------------------L1.6------------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 5mb total:"
- - "L1 "
- - "L1.?[1686841379000000000,1686842249810810810] 1686928854.57s 1mb|------L1.?------| "
- - "L1.?[1686842249810810811,1686843120621621620] 1686928854.57s 1mb |------L1.?------| "
- - "L1.?[1686843120621621621,1686843991432432430] 1686928854.57s 1mb |------L1.?------| "
- - "L1.?[1686843991432432431,1686844862243243240] 1686928854.57s 1mb |------L1.?------| "
- - "L1.?[1686844862243243241,1686845579000000000] 1686928854.57s 947kb |----L1.?-----| "
- - "**** Simulation run 1, type=split(HighL0OverlapTotalBacklog)(split_times=[1686842249810810810, 1686843120621621620, 1686843991432432430, 1686844862243243240, 1686845733054054050, 1686846603864864860, 1686847474675675670, 1686848345486486480, 1686849216297297290, 1686850087108108100, 1686850957918918910, 1686851828729729720, 1686852699540540530]). 1 Input Files, 144mb total:"
- - "L0, all files 144mb "
- - "L0.13[1686841379000000000,1686853319000000000] 1686929421.02s|-----------------------------------------L0.13------------------------------------------|"
- - "**** 14 Output Files (parquet_file_id not yet assigned), 144mb total:"
- - "L0 "
- - "L0.?[1686841379000000000,1686842249810810810] 1686929421.02s 10mb|L0.?| "
- - "L0.?[1686842249810810811,1686843120621621620] 1686929421.02s 10mb |L0.?| "
- - "L0.?[1686843120621621621,1686843991432432430] 1686929421.02s 10mb |L0.?| "
- - "L0.?[1686843991432432431,1686844862243243240] 1686929421.02s 10mb |L0.?| "
- - "L0.?[1686844862243243241,1686845733054054050] 1686929421.02s 10mb |L0.?| "
- - "L0.?[1686845733054054051,1686846603864864860] 1686929421.02s 10mb |L0.?| "
- - "L0.?[1686846603864864861,1686847474675675670] 1686929421.02s 10mb |L0.?| "
- - "L0.?[1686847474675675671,1686848345486486480] 1686929421.02s 10mb |L0.?| "
- - "L0.?[1686848345486486481,1686849216297297290] 1686929421.02s 10mb |L0.?| "
- - "L0.?[1686849216297297291,1686850087108108100] 1686929421.02s 10mb |L0.?| "
- - "L0.?[1686850087108108101,1686850957918918910] 1686929421.02s 10mb |L0.?| "
- - "L0.?[1686850957918918911,1686851828729729720] 1686929421.02s 10mb |L0.?| "
- - "L0.?[1686851828729729721,1686852699540540530] 1686929421.02s 10mb |L0.?| "
- - "L0.?[1686852699540540531,1686853319000000000] 1686929421.02s 7mb |L0.?|"
- - "**** Simulation run 2, type=split(HighL0OverlapTotalBacklog)(split_times=[1686842249810810810, 1686843120621621620, 1686843991432432430, 1686844862243243240, 1686845733054054050, 1686846603864864860, 1686847474675675670, 1686848345486486480, 1686849216297297290, 1686850087108108100, 1686850957918918910, 1686851828729729720, 1686852699540540530]). 1 Input Files, 72mb total:"
- - "L0, all files 72mb "
- - "L0.15[1686841379000000000,1686852839000000000] 1686929712.33s|-----------------------------------------L0.15------------------------------------------|"
- - "**** 14 Output Files (parquet_file_id not yet assigned), 72mb total:"
- - "L0 "
- - "L0.?[1686841379000000000,1686842249810810810] 1686929712.33s 5mb|L0.?| "
- - "L0.?[1686842249810810811,1686843120621621620] 1686929712.33s 5mb |L0.?| "
- - "L0.?[1686843120621621621,1686843991432432430] 1686929712.33s 5mb |L0.?| "
- - "L0.?[1686843991432432431,1686844862243243240] 1686929712.33s 5mb |L0.?| "
- - "L0.?[1686844862243243241,1686845733054054050] 1686929712.33s 5mb |L0.?| "
- - "L0.?[1686845733054054051,1686846603864864860] 1686929712.33s 5mb |L0.?| "
- - "L0.?[1686846603864864861,1686847474675675670] 1686929712.33s 5mb |L0.?| "
- - "L0.?[1686847474675675671,1686848345486486480] 1686929712.33s 5mb |L0.?| "
- - "L0.?[1686848345486486481,1686849216297297290] 1686929712.33s 5mb |L0.?| "
- - "L0.?[1686849216297297291,1686850087108108100] 1686929712.33s 5mb |L0.?| "
- - "L0.?[1686850087108108101,1686850957918918910] 1686929712.33s 5mb |L0.?| "
- - "L0.?[1686850957918918911,1686851828729729720] 1686929712.33s 5mb |L0.?| "
- - "L0.?[1686851828729729721,1686852699540540530] 1686929712.33s 5mb |L0.?| "
- - "L0.?[1686852699540540531,1686852839000000000] 1686929712.33s 898kb |L0.?|"
- - "**** Simulation run 3, type=split(HighL0OverlapTotalBacklog)(split_times=[1686842249810810810, 1686843120621621620, 1686843991432432430, 1686844862243243240, 1686845733054054050, 1686846603864864860, 1686847474675675670, 1686848345486486480, 1686849216297297290, 1686850087108108100, 1686850957918918910, 1686851828729729720, 1686852699540540530, 1686853570351351340, 1686854441162162150]). 1 Input Files, 84mb total:"
- - "L0, all files 84mb "
- - "L0.9[1686841379000000000,1686855059000000000] 1686929965.33s|------------------------------------------L0.9------------------------------------------|"
- - "**** 16 Output Files (parquet_file_id not yet assigned), 84mb total:"
- - "L0 "
- - "L0.?[1686841379000000000,1686842249810810810] 1686929965.33s 5mb|L0.?| "
- - "L0.?[1686842249810810811,1686843120621621620] 1686929965.33s 5mb |L0.?| "
- - "L0.?[1686843120621621621,1686843991432432430] 1686929965.33s 5mb |L0.?| "
- - "L0.?[1686843991432432431,1686844862243243240] 1686929965.33s 5mb |L0.?| "
- - "L0.?[1686844862243243241,1686845733054054050] 1686929965.33s 5mb |L0.?| "
- - "L0.?[1686845733054054051,1686846603864864860] 1686929965.33s 5mb |L0.?| "
- - "L0.?[1686846603864864861,1686847474675675670] 1686929965.33s 5mb |L0.?| "
- - "L0.?[1686847474675675671,1686848345486486480] 1686929965.33s 5mb |L0.?| "
- - "L0.?[1686848345486486481,1686849216297297290] 1686929965.33s 5mb |L0.?| "
- - "L0.?[1686849216297297291,1686850087108108100] 1686929965.33s 5mb |L0.?| "
- - "L0.?[1686850087108108101,1686850957918918910] 1686929965.33s 5mb |L0.?| "
- - "L0.?[1686850957918918911,1686851828729729720] 1686929965.33s 5mb |L0.?| "
- - "L0.?[1686851828729729721,1686852699540540530] 1686929965.33s 5mb |L0.?| "
- - "L0.?[1686852699540540531,1686853570351351340] 1686929965.33s 5mb |L0.?| "
- - "L0.?[1686853570351351341,1686854441162162150] 1686929965.33s 5mb |L0.?| "
- - "L0.?[1686854441162162151,1686855059000000000] 1686929965.33s 4mb |L0.?|"
- - "**** Simulation run 4, type=split(HighL0OverlapTotalBacklog)(split_times=[1686842249810810810, 1686843120621621620, 1686843991432432430, 1686844862243243240, 1686845733054054050, 1686846603864864860, 1686847474675675670, 1686848345486486480, 1686849216297297290, 1686850087108108100, 1686850957918918910, 1686851828729729720, 1686852699540540530]). 1 Input Files, 143mb total:"
- - "L0, all files 143mb "
- - "L0.1[1686841379000000000,1686853019000000000] 1686930563.07s|------------------------------------------L0.1------------------------------------------|"
- - "**** 14 Output Files (parquet_file_id not yet assigned), 143mb total:"
- - "L0 "
- - "L0.?[1686841379000000000,1686842249810810810] 1686930563.07s 11mb|L0.?| "
- - "L0.?[1686842249810810811,1686843120621621620] 1686930563.07s 11mb |L0.?| "
- - "L0.?[1686843120621621621,1686843991432432430] 1686930563.07s 11mb |L0.?| "
- - "L0.?[1686843991432432431,1686844862243243240] 1686930563.07s 11mb |L0.?| "
- - "L0.?[1686844862243243241,1686845733054054050] 1686930563.07s 11mb |L0.?| "
- - "L0.?[1686845733054054051,1686846603864864860] 1686930563.07s 11mb |L0.?| "
- - "L0.?[1686846603864864861,1686847474675675670] 1686930563.07s 11mb |L0.?| "
- - "L0.?[1686847474675675671,1686848345486486480] 1686930563.07s 11mb |L0.?| "
- - "L0.?[1686848345486486481,1686849216297297290] 1686930563.07s 11mb |L0.?| "
- - "L0.?[1686849216297297291,1686850087108108100] 1686930563.07s 11mb |L0.?| "
- - "L0.?[1686850087108108101,1686850957918918910] 1686930563.07s 11mb |L0.?| "
- - "L0.?[1686850957918918911,1686851828729729720] 1686930563.07s 11mb |L0.?| "
- - "L0.?[1686851828729729721,1686852699540540530] 1686930563.07s 11mb |L0.?| "
- - "L0.?[1686852699540540531,1686853019000000000] 1686930563.07s 4mb |L0.?|"
- - "**** Simulation run 5, type=split(HighL0OverlapTotalBacklog)(split_times=[1686842249810810810, 1686843120621621620, 1686843991432432430, 1686844862243243240, 1686845733054054050, 1686846603864864860, 1686847474675675670, 1686848345486486480, 1686849216297297290, 1686850087108108100, 1686850957918918910, 1686851828729729720, 1686852699540540530]). 1 Input Files, 55mb total:"
- - "L0, all files 55mb "
- - "L0.14[1686841379000000000,1686853379000000000] 1686930780.95s|-----------------------------------------L0.14------------------------------------------|"
- - "**** 14 Output Files (parquet_file_id not yet assigned), 55mb total:"
- - "L0 "
- - "L0.?[1686841379000000000,1686842249810810810] 1686930780.95s 4mb|L0.?| "
- - "L0.?[1686842249810810811,1686843120621621620] 1686930780.95s 4mb |L0.?| "
- - "L0.?[1686843120621621621,1686843991432432430] 1686930780.95s 4mb |L0.?| "
- - "L0.?[1686843991432432431,1686844862243243240] 1686930780.95s 4mb |L0.?| "
- - "L0.?[1686844862243243241,1686845733054054050] 1686930780.95s 4mb |L0.?| "
- - "L0.?[1686845733054054051,1686846603864864860] 1686930780.95s 4mb |L0.?| "
- - "L0.?[1686846603864864861,1686847474675675670] 1686930780.95s 4mb |L0.?| "
- - "L0.?[1686847474675675671,1686848345486486480] 1686930780.95s 4mb |L0.?| "
- - "L0.?[1686848345486486481,1686849216297297290] 1686930780.95s 4mb |L0.?| "
- - "L0.?[1686849216297297291,1686850087108108100] 1686930780.95s 4mb |L0.?| "
- - "L0.?[1686850087108108101,1686850957918918910] 1686930780.95s 4mb |L0.?| "
- - "L0.?[1686850957918918911,1686851828729729720] 1686930780.95s 4mb |L0.?| "
- - "L0.?[1686851828729729721,1686852699540540530] 1686930780.95s 4mb |L0.?| "
- - "L0.?[1686852699540540531,1686853379000000000] 1686930780.95s 3mb |L0.?|"
- - "**** Simulation run 6, type=split(HighL0OverlapTotalBacklog)(split_times=[1686842249810810810, 1686843120621621620, 1686843991432432430, 1686844862243243240, 1686845733054054050, 1686846603864864860, 1686847474675675670, 1686848345486486480, 1686849216297297290, 1686850087108108100, 1686850957918918910, 1686851828729729720, 1686852699540540530]). 1 Input Files, 143mb total:"
- - "L0, all files 143mb "
- - "L0.12[1686841379000000000,1686853079000000000] 1686931336.08s|-----------------------------------------L0.12------------------------------------------|"
- - "**** 14 Output Files (parquet_file_id not yet assigned), 143mb total:"
- - "L0 "
- - "L0.?[1686841379000000000,1686842249810810810] 1686931336.08s 11mb|L0.?| "
- - "L0.?[1686842249810810811,1686843120621621620] 1686931336.08s 11mb |L0.?| "
- - "L0.?[1686843120621621621,1686843991432432430] 1686931336.08s 11mb |L0.?| "
- - "L0.?[1686843991432432431,1686844862243243240] 1686931336.08s 11mb |L0.?| "
- - "L0.?[1686844862243243241,1686845733054054050] 1686931336.08s 11mb |L0.?| "
- - "L0.?[1686845733054054051,1686846603864864860] 1686931336.08s 11mb |L0.?| "
- - "L0.?[1686846603864864861,1686847474675675670] 1686931336.08s 11mb |L0.?| "
- - "L0.?[1686847474675675671,1686848345486486480] 1686931336.08s 11mb |L0.?| "
- - "L0.?[1686848345486486481,1686849216297297290] 1686931336.08s 11mb |L0.?| "
- - "L0.?[1686849216297297291,1686850087108108100] 1686931336.08s 11mb |L0.?| "
- - "L0.?[1686850087108108101,1686850957918918910] 1686931336.08s 11mb |L0.?| "
- - "L0.?[1686850957918918911,1686851828729729720] 1686931336.08s 11mb |L0.?| "
- - "L0.?[1686851828729729721,1686852699540540530] 1686931336.08s 11mb |L0.?| "
- - "L0.?[1686852699540540531,1686853079000000000] 1686931336.08s 5mb |L0.?|"
- - "**** Simulation run 7, type=split(HighL0OverlapTotalBacklog)(split_times=[1686842249810810810, 1686843120621621620, 1686843991432432430, 1686844862243243240, 1686845733054054050, 1686846603864864860, 1686847474675675670, 1686848345486486480, 1686849216297297290, 1686850087108108100, 1686850957918918910, 1686851828729729720, 1686852699540540530]). 1 Input Files, 70mb total:"
- - "L0, all files 70mb "
- - "L0.17[1686841379000000000,1686852899000000000] 1686931600.58s|-----------------------------------------L0.17------------------------------------------|"
- - "**** 14 Output Files (parquet_file_id not yet assigned), 70mb total:"
- - "L0 "
- - "L0.?[1686841379000000000,1686842249810810810] 1686931600.58s 5mb|L0.?| "
- - "L0.?[1686842249810810811,1686843120621621620] 1686931600.58s 5mb |L0.?| "
- - "L0.?[1686843120621621621,1686843991432432430] 1686931600.58s 5mb |L0.?| "
- - "L0.?[1686843991432432431,1686844862243243240] 1686931600.58s 5mb |L0.?| "
- - "L0.?[1686844862243243241,1686845733054054050] 1686931600.58s 5mb |L0.?| "
- - "L0.?[1686845733054054051,1686846603864864860] 1686931600.58s 5mb |L0.?| "
- - "L0.?[1686846603864864861,1686847474675675670] 1686931600.58s 5mb |L0.?| "
- - "L0.?[1686847474675675671,1686848345486486480] 1686931600.58s 5mb |L0.?| "
- - "L0.?[1686848345486486481,1686849216297297290] 1686931600.58s 5mb |L0.?| "
- - "L0.?[1686849216297297291,1686850087108108100] 1686931600.58s 5mb |L0.?| "
- - "L0.?[1686850087108108101,1686850957918918910] 1686931600.58s 5mb |L0.?| "
- - "L0.?[1686850957918918911,1686851828729729720] 1686931600.58s 5mb |L0.?| "
- - "L0.?[1686851828729729721,1686852699540540530] 1686931600.58s 5mb |L0.?| "
- - "L0.?[1686852699540540531,1686852899000000000] 1686931600.58s 1mb |L0.?|"
- - "**** Simulation run 8, type=split(HighL0OverlapTotalBacklog)(split_times=[1686842249810810810, 1686843120621621620, 1686843991432432430, 1686844862243243240, 1686845733054054050, 1686846603864864860, 1686847474675675670, 1686848345486486480, 1686849216297297290, 1686850087108108100, 1686850957918918910, 1686851828729729720, 1686852699540540530, 1686853570351351340, 1686854441162162150, 1686855311972972960]). 1 Input Files, 86mb total:"
- - "L0, all files 86mb "
- - "L0.10[1686841379000000000,1686855659000000000] 1686931893.7s|-----------------------------------------L0.10------------------------------------------|"
- - "**** 17 Output Files (parquet_file_id not yet assigned), 86mb total:"
- - "L0 "
- - "L0.?[1686841379000000000,1686842249810810810] 1686931893.7s 5mb|L0.?| "
- - "L0.?[1686842249810810811,1686843120621621620] 1686931893.7s 5mb |L0.?| "
- - "L0.?[1686843120621621621,1686843991432432430] 1686931893.7s 5mb |L0.?| "
- - "L0.?[1686843991432432431,1686844862243243240] 1686931893.7s 5mb |L0.?| "
- - "L0.?[1686844862243243241,1686845733054054050] 1686931893.7s 5mb |L0.?| "
- - "L0.?[1686845733054054051,1686846603864864860] 1686931893.7s 5mb |L0.?| "
- - "L0.?[1686846603864864861,1686847474675675670] 1686931893.7s 5mb |L0.?| "
- - "L0.?[1686847474675675671,1686848345486486480] 1686931893.7s 5mb |L0.?| "
- - "L0.?[1686848345486486481,1686849216297297290] 1686931893.7s 5mb |L0.?| "
- - "L0.?[1686849216297297291,1686850087108108100] 1686931893.7s 5mb |L0.?| "
- - "L0.?[1686850087108108101,1686850957918918910] 1686931893.7s 5mb |L0.?| "
- - "L0.?[1686850957918918911,1686851828729729720] 1686931893.7s 5mb |L0.?| "
- - "L0.?[1686851828729729721,1686852699540540530] 1686931893.7s 5mb |L0.?| "
- - "L0.?[1686852699540540531,1686853570351351340] 1686931893.7s 5mb |L0.?| "
- - "L0.?[1686853570351351341,1686854441162162150] 1686931893.7s 5mb |L0.?| "
- - "L0.?[1686854441162162151,1686855311972972960] 1686931893.7s 5mb |L0.?| "
- - "L0.?[1686855311972972961,1686855659000000000] 1686931893.7s 2mb |L0.?|"
- - "**** Simulation run 9, type=split(HighL0OverlapTotalBacklog)(split_times=[1686842249810810810, 1686843120621621620, 1686843991432432430, 1686844862243243240, 1686845733054054050, 1686846603864864860, 1686847474675675670, 1686848345486486480, 1686849216297297290]). 1 Input Files, 100mb total:"
- - "L0, all files 100mb "
- - "L0.21[1686841379000000000,1686849719000000000] 1686932458.05s|-----------------------------------------L0.21------------------------------------------|"
- - "**** 10 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L0 "
- - "L0.?[1686841379000000000,1686842249810810810] 1686932458.05s 10mb|-L0.?--| "
- - "L0.?[1686842249810810811,1686843120621621620] 1686932458.05s 10mb |-L0.?--| "
- - "L0.?[1686843120621621621,1686843991432432430] 1686932458.05s 10mb |-L0.?--| "
- - "L0.?[1686843991432432431,1686844862243243240] 1686932458.05s 10mb |-L0.?--| "
- - "L0.?[1686844862243243241,1686845733054054050] 1686932458.05s 10mb |-L0.?--| "
- - "L0.?[1686845733054054051,1686846603864864860] 1686932458.05s 10mb |-L0.?--| "
- - "L0.?[1686846603864864861,1686847474675675670] 1686932458.05s 10mb |-L0.?--| "
- - "L0.?[1686847474675675671,1686848345486486480] 1686932458.05s 10mb |-L0.?--| "
- - "L0.?[1686848345486486481,1686849216297297290] 1686932458.05s 10mb |-L0.?--| "
- - "L0.?[1686849216297297291,1686849719000000000] 1686932458.05s 6mb |L0.?|"
- - "**** Simulation run 10, type=split(HighL0OverlapTotalBacklog)(split_times=[1686842249810810810, 1686843120621621620, 1686843991432432430, 1686844862243243240, 1686845733054054050, 1686846603864864860, 1686847474675675670, 1686848345486486480, 1686849216297297290, 1686850087108108100, 1686850957918918910, 1686851828729729720, 1686852699540540530, 1686853570351351340]). 1 Input Files, 64mb total:"
- - "L0, all files 64mb "
- - "L0.23[1686841379000000000,1686854219000000000] 1686932677.39s|-----------------------------------------L0.23------------------------------------------|"
- - "**** 15 Output Files (parquet_file_id not yet assigned), 64mb total:"
- - "L0 "
- - "L0.?[1686841379000000000,1686842249810810810] 1686932677.39s 4mb|L0.?| "
- - "L0.?[1686842249810810811,1686843120621621620] 1686932677.39s 4mb |L0.?| "
- - "L0.?[1686843120621621621,1686843991432432430] 1686932677.39s 4mb |L0.?| "
- - "L0.?[1686843991432432431,1686844862243243240] 1686932677.39s 4mb |L0.?| "
- - "L0.?[1686844862243243241,1686845733054054050] 1686932677.39s 4mb |L0.?| "
- - "L0.?[1686845733054054051,1686846603864864860] 1686932677.39s 4mb |L0.?| "
- - "L0.?[1686846603864864861,1686847474675675670] 1686932677.39s 4mb |L0.?| "
- - "L0.?[1686847474675675671,1686848345486486480] 1686932677.39s 4mb |L0.?| "
- - "L0.?[1686848345486486481,1686849216297297290] 1686932677.39s 4mb |L0.?| "
- - "L0.?[1686849216297297291,1686850087108108100] 1686932677.39s 4mb |L0.?| "
- - "L0.?[1686850087108108101,1686850957918918910] 1686932677.39s 4mb |L0.?| "
- - "L0.?[1686850957918918911,1686851828729729720] 1686932677.39s 4mb |L0.?| "
- - "L0.?[1686851828729729721,1686852699540540530] 1686932677.39s 4mb |L0.?| "
- - "L0.?[1686852699540540531,1686853570351351340] 1686932677.39s 4mb |L0.?| "
- - "L0.?[1686853570351351341,1686854219000000000] 1686932677.39s 3mb |L0.?|"
- - "**** Simulation run 11, type=split(HighL0OverlapTotalBacklog)(split_times=[1686842249810810810, 1686843120621621620, 1686843991432432430, 1686844862243243240, 1686845733054054050, 1686846603864864860, 1686847474675675670, 1686848345486486480, 1686849216297297290, 1686850087108108100, 1686850957918918910, 1686851828729729720, 1686852699540540530]). 1 Input Files, 142mb total:"
- - "L0, all files 142mb "
- - "L0.16[1686841379000000000,1686853019000000000] 1686933271.57s|-----------------------------------------L0.16------------------------------------------|"
- - "**** 14 Output Files (parquet_file_id not yet assigned), 142mb total:"
- - "L0 "
- - "L0.?[1686841379000000000,1686842249810810810] 1686933271.57s 11mb|L0.?| "
- - "L0.?[1686842249810810811,1686843120621621620] 1686933271.57s 11mb |L0.?| "
- - "L0.?[1686843120621621621,1686843991432432430] 1686933271.57s 11mb |L0.?| "
- - "L0.?[1686843991432432431,1686844862243243240] 1686933271.57s 11mb |L0.?| "
- - "L0.?[1686844862243243241,1686845733054054050] 1686933271.57s 11mb |L0.?| "
- - "L0.?[1686845733054054051,1686846603864864860] 1686933271.57s 11mb |L0.?| "
- - "L0.?[1686846603864864861,1686847474675675670] 1686933271.57s 11mb |L0.?| "
- - "L0.?[1686847474675675671,1686848345486486480] 1686933271.57s 11mb |L0.?| "
- - "L0.?[1686848345486486481,1686849216297297290] 1686933271.57s 11mb |L0.?| "
- - "L0.?[1686849216297297291,1686850087108108100] 1686933271.57s 11mb |L0.?| "
- - "L0.?[1686850087108108101,1686850957918918910] 1686933271.57s 11mb |L0.?| "
- - "L0.?[1686850957918918911,1686851828729729720] 1686933271.57s 11mb |L0.?| "
- - "L0.?[1686851828729729721,1686852699540540530] 1686933271.57s 11mb |L0.?| "
- - "L0.?[1686852699540540531,1686853019000000000] 1686933271.57s 4mb |L0.?|"
- - "**** Simulation run 12, type=split(HighL0OverlapTotalBacklog)(split_times=[1686842249810810810, 1686843120621621620, 1686843991432432430, 1686844862243243240, 1686845733054054050, 1686846603864864860, 1686847474675675670, 1686848345486486480, 1686849216297297290, 1686850087108108100, 1686850957918918910, 1686851828729729720, 1686852699540540530]). 1 Input Files, 71mb total:"
- - "L0, all files 71mb "
- - "L0.18[1686841379000000000,1686852959000000000] 1686933528.17s|-----------------------------------------L0.18------------------------------------------|"
- - "**** 14 Output Files (parquet_file_id not yet assigned), 71mb total:"
- - "L0 "
- - "L0.?[1686841379000000000,1686842249810810810] 1686933528.17s 5mb|L0.?| "
- - "L0.?[1686842249810810811,1686843120621621620] 1686933528.17s 5mb |L0.?| "
- - "L0.?[1686843120621621621,1686843991432432430] 1686933528.17s 5mb |L0.?| "
- - "L0.?[1686843991432432431,1686844862243243240] 1686933528.17s 5mb |L0.?| "
- - "L0.?[1686844862243243241,1686845733054054050] 1686933528.17s 5mb |L0.?| "
- - "L0.?[1686845733054054051,1686846603864864860] 1686933528.17s 5mb |L0.?| "
- - "L0.?[1686846603864864861,1686847474675675670] 1686933528.17s 5mb |L0.?| "
- - "L0.?[1686847474675675671,1686848345486486480] 1686933528.17s 5mb |L0.?| "
- - "L0.?[1686848345486486481,1686849216297297290] 1686933528.17s 5mb |L0.?| "
- - "L0.?[1686849216297297291,1686850087108108100] 1686933528.17s 5mb |L0.?| "
- - "L0.?[1686850087108108101,1686850957918918910] 1686933528.17s 5mb |L0.?| "
- - "L0.?[1686850957918918911,1686851828729729720] 1686933528.17s 5mb |L0.?| "
- - "L0.?[1686851828729729721,1686852699540540530] 1686933528.17s 5mb |L0.?| "
- - "L0.?[1686852699540540531,1686852959000000000] 1686933528.17s 2mb |L0.?|"
- - "**** Simulation run 13, type=split(HighL0OverlapTotalBacklog)(split_times=[1686842249810810810, 1686843120621621620, 1686843991432432430, 1686844862243243240, 1686845733054054050, 1686846603864864860, 1686847474675675670, 1686848345486486480, 1686849216297297290, 1686850087108108100, 1686850957918918910, 1686851828729729720, 1686852699540540530, 1686853570351351340, 1686854441162162150]). 1 Input Files, 85mb total:"
- - "L0, all files 85mb "
- - "L0.19[1686841379000000000,1686855119000000000] 1686933830.06s|-----------------------------------------L0.19------------------------------------------|"
- - "**** 16 Output Files (parquet_file_id not yet assigned), 85mb total:"
- - "L0 "
- - "L0.?[1686841379000000000,1686842249810810810] 1686933830.06s 5mb|L0.?| "
- - "L0.?[1686842249810810811,1686843120621621620] 1686933830.06s 5mb |L0.?| "
- - "L0.?[1686843120621621621,1686843991432432430] 1686933830.06s 5mb |L0.?| "
- - "L0.?[1686843991432432431,1686844862243243240] 1686933830.06s 5mb |L0.?| "
- - "L0.?[1686844862243243241,1686845733054054050] 1686933830.06s 5mb |L0.?| "
- - "L0.?[1686845733054054051,1686846603864864860] 1686933830.06s 5mb |L0.?| "
- - "L0.?[1686846603864864861,1686847474675675670] 1686933830.06s 5mb |L0.?| "
- - "L0.?[1686847474675675671,1686848345486486480] 1686933830.06s 5mb |L0.?| "
- - "L0.?[1686848345486486481,1686849216297297290] 1686933830.06s 5mb |L0.?| "
- - "L0.?[1686849216297297291,1686850087108108100] 1686933830.06s 5mb |L0.?| "
- - "L0.?[1686850087108108101,1686850957918918910] 1686933830.06s 5mb |L0.?| "
- - "L0.?[1686850957918918911,1686851828729729720] 1686933830.06s 5mb |L0.?| "
- - "L0.?[1686851828729729721,1686852699540540530] 1686933830.06s 5mb |L0.?| "
- - "L0.?[1686852699540540531,1686853570351351340] 1686933830.06s 5mb |L0.?| "
- - "L0.?[1686853570351351341,1686854441162162150] 1686933830.06s 5mb |L0.?| "
- - "L0.?[1686854441162162151,1686855119000000000] 1686933830.06s 4mb |L0.?|"
- - "**** Simulation run 14, type=split(HighL0OverlapTotalBacklog)(split_times=[1686842249810810810, 1686843120621621620, 1686843991432432430, 1686844862243243240, 1686845733054054050, 1686846603864864860, 1686847474675675670, 1686848345486486480, 1686849216297297290, 1686850087108108100, 1686850957918918910, 1686851828729729720]). 1 Input Files, 101mb total:"
- - "L0, all files 101mb "
- - "L0.20[1686841379000000000,1686852119000000000] 1686934254.96s|-----------------------------------------L0.20------------------------------------------|"
- - "**** 13 Output Files (parquet_file_id not yet assigned), 101mb total:"
- - "L0 "
- - "L0.?[1686841379000000000,1686842249810810810] 1686934254.96s 8mb|L0.?-| "
- - "L0.?[1686842249810810811,1686843120621621620] 1686934254.96s 8mb |L0.?-| "
- - "L0.?[1686843120621621621,1686843991432432430] 1686934254.96s 8mb |L0.?-| "
- - "L0.?[1686843991432432431,1686844862243243240] 1686934254.96s 8mb |L0.?-| "
- - "L0.?[1686844862243243241,1686845733054054050] 1686934254.96s 8mb |L0.?-| "
- - "L0.?[1686845733054054051,1686846603864864860] 1686934254.96s 8mb |L0.?-| "
- - "L0.?[1686846603864864861,1686847474675675670] 1686934254.96s 8mb |L0.?-| "
- - "L0.?[1686847474675675671,1686848345486486480] 1686934254.96s 8mb |L0.?-| "
- - "L0.?[1686848345486486481,1686849216297297290] 1686934254.96s 8mb |L0.?-| "
- - "L0.?[1686849216297297291,1686850087108108100] 1686934254.96s 8mb |L0.?-| "
- - "L0.?[1686850087108108101,1686850957918918910] 1686934254.96s 8mb |L0.?-| "
- - "L0.?[1686850957918918911,1686851828729729720] 1686934254.96s 8mb |L0.?-| "
- - "L0.?[1686851828729729721,1686852119000000000] 1686934254.96s 3mb |L0.?|"
- - "**** Simulation run 15, type=split(HighL0OverlapTotalBacklog)(split_times=[1686842249810810810, 1686843120621621620, 1686843991432432430, 1686844862243243240, 1686845733054054050, 1686846603864864860, 1686847474675675670, 1686848345486486480, 1686849216297297290, 1686850087108108100, 1686850957918918910, 1686851828729729720, 1686852699540540530, 1686853570351351340]). 1 Input Files, 143mb total:"
- - "L0, all files 143mb "
- - "L0.22[1686841379000000000,1686853679000000000] 1686934759.75s|-----------------------------------------L0.22------------------------------------------|"
- - "**** 15 Output Files (parquet_file_id not yet assigned), 143mb total:"
- - "L0 "
- - "L0.?[1686841379000000000,1686842249810810810] 1686934759.75s 10mb|L0.?| "
- - "L0.?[1686842249810810811,1686843120621621620] 1686934759.75s 10mb |L0.?| "
- - "L0.?[1686843120621621621,1686843991432432430] 1686934759.75s 10mb |L0.?| "
- - "L0.?[1686843991432432431,1686844862243243240] 1686934759.75s 10mb |L0.?| "
- - "L0.?[1686844862243243241,1686845733054054050] 1686934759.75s 10mb |L0.?| "
- - "L0.?[1686845733054054051,1686846603864864860] 1686934759.75s 10mb |L0.?| "
- - "L0.?[1686846603864864861,1686847474675675670] 1686934759.75s 10mb |L0.?| "
- - "L0.?[1686847474675675671,1686848345486486480] 1686934759.75s 10mb |L0.?| "
- - "L0.?[1686848345486486481,1686849216297297290] 1686934759.75s 10mb |L0.?| "
- - "L0.?[1686849216297297291,1686850087108108100] 1686934759.75s 10mb |L0.?| "
- - "L0.?[1686850087108108101,1686850957918918910] 1686934759.75s 10mb |L0.?| "
- - "L0.?[1686850957918918911,1686851828729729720] 1686934759.75s 10mb |L0.?| "
- - "L0.?[1686851828729729721,1686852699540540530] 1686934759.75s 10mb |L0.?| "
- - "L0.?[1686852699540540531,1686853570351351340] 1686934759.75s 10mb |L0.?| "
- - "L0.?[1686853570351351341,1686853679000000000] 1686934759.75s 1mb |L0.?|"
- - "**** Simulation run 16, type=split(HighL0OverlapTotalBacklog)(split_times=[1686842249810810810, 1686843120621621620, 1686843991432432430, 1686844862243243240, 1686845733054054050, 1686846603864864860, 1686847474675675670, 1686848345486486480, 1686849216297297290, 1686850087108108100, 1686850957918918910, 1686851828729729720, 1686852699540540530]). 1 Input Files, 72mb total:"
- - "L0, all files 72mb "
- - "L0.11[1686841379000000000,1686852899000000000] 1686934966.48s|-----------------------------------------L0.11------------------------------------------|"
- - "**** 14 Output Files (parquet_file_id not yet assigned), 72mb total:"
- - "L0 "
- - "L0.?[1686841379000000000,1686842249810810810] 1686934966.48s 5mb|L0.?| "
- - "L0.?[1686842249810810811,1686843120621621620] 1686934966.48s 5mb |L0.?| "
- - "L0.?[1686843120621621621,1686843991432432430] 1686934966.48s 5mb |L0.?| "
- - "L0.?[1686843991432432431,1686844862243243240] 1686934966.48s 5mb |L0.?| "
- - "L0.?[1686844862243243241,1686845733054054050] 1686934966.48s 5mb |L0.?| "
- - "L0.?[1686845733054054051,1686846603864864860] 1686934966.48s 5mb |L0.?| "
- - "L0.?[1686846603864864861,1686847474675675670] 1686934966.48s 5mb |L0.?| "
- - "L0.?[1686847474675675671,1686848345486486480] 1686934966.48s 5mb |L0.?| "
- - "L0.?[1686848345486486481,1686849216297297290] 1686934966.48s 5mb |L0.?| "
- - "L0.?[1686849216297297291,1686850087108108100] 1686934966.48s 5mb |L0.?| "
- - "L0.?[1686850087108108101,1686850957918918910] 1686934966.48s 5mb |L0.?| "
- - "L0.?[1686850957918918911,1686851828729729720] 1686934966.48s 5mb |L0.?| "
- - "L0.?[1686851828729729721,1686852699540540530] 1686934966.48s 5mb |L0.?| "
- - "L0.?[1686852699540540531,1686852899000000000] 1686934966.48s 1mb |L0.?|"
- - "**** Simulation run 17, type=split(HighL0OverlapTotalBacklog)(split_times=[1686842249810810810, 1686843120621621620, 1686843991432432430, 1686844862243243240, 1686845733054054050, 1686846603864864860, 1686847474675675670, 1686848345486486480, 1686849216297297290, 1686850087108108100, 1686850957918918910, 1686851828729729720, 1686852699540540530, 1686853570351351340, 1686854441162162150, 1686855311972972960]). 1 Input Files, 83mb total:"
- - "L0, all files 83mb "
- - "L0.8[1686841379000000000,1686855419000000000] 1686935151.54s|------------------------------------------L0.8------------------------------------------|"
- - "**** 17 Output Files (parquet_file_id not yet assigned), 83mb total:"
- - "L0 "
- - "L0.?[1686841379000000000,1686842249810810810] 1686935151.54s 5mb|L0.?| "
- - "L0.?[1686842249810810811,1686843120621621620] 1686935151.54s 5mb |L0.?| "
- - "L0.?[1686843120621621621,1686843991432432430] 1686935151.54s 5mb |L0.?| "
- - "L0.?[1686843991432432431,1686844862243243240] 1686935151.54s 5mb |L0.?| "
- - "L0.?[1686844862243243241,1686845733054054050] 1686935151.54s 5mb |L0.?| "
- - "L0.?[1686845733054054051,1686846603864864860] 1686935151.54s 5mb |L0.?| "
- - "L0.?[1686846603864864861,1686847474675675670] 1686935151.54s 5mb |L0.?| "
- - "L0.?[1686847474675675671,1686848345486486480] 1686935151.54s 5mb |L0.?| "
- - "L0.?[1686848345486486481,1686849216297297290] 1686935151.54s 5mb |L0.?| "
- - "L0.?[1686849216297297291,1686850087108108100] 1686935151.54s 5mb |L0.?| "
- - "L0.?[1686850087108108101,1686850957918918910] 1686935151.54s 5mb |L0.?| "
- - "L0.?[1686850957918918911,1686851828729729720] 1686935151.54s 5mb |L0.?| "
- - "L0.?[1686851828729729721,1686852699540540530] 1686935151.54s 5mb |L0.?| "
- - "L0.?[1686852699540540531,1686853570351351340] 1686935151.54s 5mb |L0.?| "
- - "L0.?[1686853570351351341,1686854441162162150] 1686935151.54s 5mb |L0.?| "
- - "L0.?[1686854441162162151,1686855311972972960] 1686935151.54s 5mb |L0.?| "
- - "L0.?[1686855311972972961,1686855419000000000] 1686935151.54s 649kb |L0.?|"
- - "**** Simulation run 18, type=split(HighL0OverlapTotalBacklog)(split_times=[1686842249810810810, 1686843120621621620, 1686843991432432430, 1686844862243243240, 1686845733054054050, 1686846603864864860, 1686847474675675670, 1686848345486486480, 1686849216297297290, 1686850087108108100, 1686850957918918910, 1686851828729729720, 1686852699540540530]). 1 Input Files, 144mb total:"
- - "L0, all files 144mb "
- - "L0.3[1686841379000000000,1686853319000000000] 1686935546.05s|------------------------------------------L0.3------------------------------------------|"
- - "**** 14 Output Files (parquet_file_id not yet assigned), 144mb total:"
- - "L0 "
- - "L0.?[1686841379000000000,1686842249810810810] 1686935546.05s 10mb|L0.?| "
- - "L0.?[1686842249810810811,1686843120621621620] 1686935546.05s 10mb |L0.?| "
- - "L0.?[1686843120621621621,1686843991432432430] 1686935546.05s 10mb |L0.?| "
- - "L0.?[1686843991432432431,1686844862243243240] 1686935546.05s 10mb |L0.?| "
- - "L0.?[1686844862243243241,1686845733054054050] 1686935546.05s 10mb |L0.?| "
- - "L0.?[1686845733054054051,1686846603864864860] 1686935546.05s 10mb |L0.?| "
- - "L0.?[1686846603864864861,1686847474675675670] 1686935546.05s 10mb |L0.?| "
- - "L0.?[1686847474675675671,1686848345486486480] 1686935546.05s 10mb |L0.?| "
- - "L0.?[1686848345486486481,1686849216297297290] 1686935546.05s 10mb |L0.?| "
- - "L0.?[1686849216297297291,1686850087108108100] 1686935546.05s 10mb |L0.?| "
- - "L0.?[1686850087108108101,1686850957918918910] 1686935546.05s 10mb |L0.?| "
- - "L0.?[1686850957918918911,1686851828729729720] 1686935546.05s 10mb |L0.?| "
- - "L0.?[1686851828729729721,1686852699540540530] 1686935546.05s 10mb |L0.?| "
- - "L0.?[1686852699540540531,1686853319000000000] 1686935546.05s 7mb |L0.?|"
- - "**** Simulation run 19, type=split(HighL0OverlapTotalBacklog)(split_times=[1686842249810810810, 1686843120621621620, 1686843991432432430, 1686844862243243240, 1686845733054054050, 1686846603864864860, 1686847474675675670, 1686848345486486480, 1686849216297297290, 1686850087108108100, 1686850957918918910, 1686851828729729720, 1686852699540540530]). 1 Input Files, 72mb total:"
- - "L0, all files 72mb "
- - "L0.7[1686841379000000000,1686852839000000000] 1686935742.51s|------------------------------------------L0.7------------------------------------------|"
- - "**** 14 Output Files (parquet_file_id not yet assigned), 72mb total:"
- - "L0 "
- - "L0.?[1686841379000000000,1686842249810810810] 1686935742.51s 5mb|L0.?| "
- - "L0.?[1686842249810810811,1686843120621621620] 1686935742.51s 5mb |L0.?| "
- - "L0.?[1686843120621621621,1686843991432432430] 1686935742.51s 5mb |L0.?| "
- - "L0.?[1686843991432432431,1686844862243243240] 1686935742.51s 5mb |L0.?| "
- - "L0.?[1686844862243243241,1686845733054054050] 1686935742.51s 5mb |L0.?| "
- - "L0.?[1686845733054054051,1686846603864864860] 1686935742.51s 5mb |L0.?| "
- - "L0.?[1686846603864864861,1686847474675675670] 1686935742.51s 5mb |L0.?| "
- - "L0.?[1686847474675675671,1686848345486486480] 1686935742.51s 5mb |L0.?| "
- - "L0.?[1686848345486486481,1686849216297297290] 1686935742.51s 5mb |L0.?| "
- - "L0.?[1686849216297297291,1686850087108108100] 1686935742.51s 5mb |L0.?| "
- - "L0.?[1686850087108108101,1686850957918918910] 1686935742.51s 5mb |L0.?| "
- - "L0.?[1686850957918918911,1686851828729729720] 1686935742.51s 5mb |L0.?| "
- - "L0.?[1686851828729729721,1686852699540540530] 1686935742.51s 5mb |L0.?| "
- - "L0.?[1686852699540540531,1686852839000000000] 1686935742.51s 899kb |L0.?|"
- - "**** Simulation run 20, type=split(HighL0OverlapTotalBacklog)(split_times=[1686842249810810810, 1686843120621621620, 1686843991432432430, 1686844862243243240, 1686845733054054050, 1686846603864864860, 1686847474675675670, 1686848345486486480, 1686849216297297290, 1686850087108108100, 1686850957918918910, 1686851828729729720, 1686852699540540530, 1686853570351351340, 1686854441162162150]). 1 Input Files, 83mb total:"
- - "L0, all files 83mb "
- - "L0.5[1686841379000000000,1686854759000000000] 1686935947.46s|------------------------------------------L0.5------------------------------------------|"
- - "**** 16 Output Files (parquet_file_id not yet assigned), 83mb total:"
- - "L0 "
- - "L0.?[1686841379000000000,1686842249810810810] 1686935947.46s 5mb|L0.?| "
- - "L0.?[1686842249810810811,1686843120621621620] 1686935947.46s 5mb |L0.?| "
- - "L0.?[1686843120621621621,1686843991432432430] 1686935947.46s 5mb |L0.?| "
- - "L0.?[1686843991432432431,1686844862243243240] 1686935947.46s 5mb |L0.?| "
- - "L0.?[1686844862243243241,1686845733054054050] 1686935947.46s 5mb |L0.?| "
- - "L0.?[1686845733054054051,1686846603864864860] 1686935947.46s 5mb |L0.?| "
- - "L0.?[1686846603864864861,1686847474675675670] 1686935947.46s 5mb |L0.?| "
- - "L0.?[1686847474675675671,1686848345486486480] 1686935947.46s 5mb |L0.?| "
- - "L0.?[1686848345486486481,1686849216297297290] 1686935947.46s 5mb |L0.?| "
- - "L0.?[1686849216297297291,1686850087108108100] 1686935947.46s 5mb |L0.?| "
- - "L0.?[1686850087108108101,1686850957918918910] 1686935947.46s 5mb |L0.?| "
- - "L0.?[1686850957918918911,1686851828729729720] 1686935947.46s 5mb |L0.?| "
- - "L0.?[1686851828729729721,1686852699540540530] 1686935947.46s 5mb |L0.?| "
- - "L0.?[1686852699540540531,1686853570351351340] 1686935947.46s 5mb |L0.?| "
- - "L0.?[1686853570351351341,1686854441162162150] 1686935947.46s 5mb |L0.?| "
- - "L0.?[1686854441162162151,1686854759000000000] 1686935947.46s 2mb |L0.?|"
- - "**** Simulation run 21, type=split(HighL0OverlapTotalBacklog)(split_times=[1686842249810810810, 1686843120621621620, 1686843991432432430, 1686844862243243240, 1686845733054054050, 1686846603864864860, 1686847474675675670, 1686848345486486480, 1686849216297297290, 1686850087108108100, 1686850957918918910, 1686851828729729720, 1686852699540540530, 1686853570351351340, 1686854441162162150, 1686855311972972960, 1686856182783783770, 1686857053594594580, 1686857924405405390, 1686858795216216200, 1686859666027027010, 1686860536837837820, 1686861407648648630, 1686862278459459440, 1686863149270270250, 1686864020081081060, 1686864890891891870, 1686865761702702680, 1686866632513513490, 1686867503324324300, 1686868374135135110, 1686869244945945920, 1686870115756756730, 1686870986567567540]). 1 Input Files, 98mb total:"
- - "L0, all files 98mb "
- - "L0.4[1686841379000000000,1686871559000000000] 1686936871.55s|------------------------------------------L0.4------------------------------------------|"
- - "**** 35 Output Files (parquet_file_id not yet assigned), 98mb total:"
- - "L0 "
- - "L0.?[1686841379000000000,1686842249810810810] 1686936871.55s 3mb|L0.?| "
- - "L0.?[1686842249810810811,1686843120621621620] 1686936871.55s 3mb |L0.?| "
- - "L0.?[1686843120621621621,1686843991432432430] 1686936871.55s 3mb |L0.?| "
- - "L0.?[1686843991432432431,1686844862243243240] 1686936871.55s 3mb |L0.?| "
- - "L0.?[1686844862243243241,1686845733054054050] 1686936871.55s 3mb |L0.?| "
- - "L0.?[1686845733054054051,1686846603864864860] 1686936871.55s 3mb |L0.?| "
- - "L0.?[1686846603864864861,1686847474675675670] 1686936871.55s 3mb |L0.?| "
- - "L0.?[1686847474675675671,1686848345486486480] 1686936871.55s 3mb |L0.?| "
- - "L0.?[1686848345486486481,1686849216297297290] 1686936871.55s 3mb |L0.?| "
- - "L0.?[1686849216297297291,1686850087108108100] 1686936871.55s 3mb |L0.?| "
- - "L0.?[1686850087108108101,1686850957918918910] 1686936871.55s 3mb |L0.?| "
- - "L0.?[1686850957918918911,1686851828729729720] 1686936871.55s 3mb |L0.?| "
- - "L0.?[1686851828729729721,1686852699540540530] 1686936871.55s 3mb |L0.?| "
- - "L0.?[1686852699540540531,1686853570351351340] 1686936871.55s 3mb |L0.?| "
- - "L0.?[1686853570351351341,1686854441162162150] 1686936871.55s 3mb |L0.?| "
- - "L0.?[1686854441162162151,1686855311972972960] 1686936871.55s 3mb |L0.?| "
- - "L0.?[1686855311972972961,1686856182783783770] 1686936871.55s 3mb |L0.?| "
- - "L0.?[1686856182783783771,1686857053594594580] 1686936871.55s 3mb |L0.?| "
- - "L0.?[1686857053594594581,1686857924405405390] 1686936871.55s 3mb |L0.?| "
- - "L0.?[1686857924405405391,1686858795216216200] 1686936871.55s 3mb |L0.?| "
- - "L0.?[1686858795216216201,1686859666027027010] 1686936871.55s 3mb |L0.?| "
- - "L0.?[1686859666027027011,1686860536837837820] 1686936871.55s 3mb |L0.?| "
- - "L0.?[1686860536837837821,1686861407648648630] 1686936871.55s 3mb |L0.?| "
- - "L0.?[1686861407648648631,1686862278459459440] 1686936871.55s 3mb |L0.?| "
- - "L0.?[1686862278459459441,1686863149270270250] 1686936871.55s 3mb |L0.?| "
- - "L0.?[1686863149270270251,1686864020081081060] 1686936871.55s 3mb |L0.?| "
- - "L0.?[1686864020081081061,1686864890891891870] 1686936871.55s 3mb |L0.?| "
- - "L0.?[1686864890891891871,1686865761702702680] 1686936871.55s 3mb |L0.?| "
- - "L0.?[1686865761702702681,1686866632513513490] 1686936871.55s 3mb |L0.?| "
- - "L0.?[1686866632513513491,1686867503324324300] 1686936871.55s 3mb |L0.?| "
- - "L0.?[1686867503324324301,1686868374135135110] 1686936871.55s 3mb |L0.?| "
- - "L0.?[1686868374135135111,1686869244945945920] 1686936871.55s 3mb |L0.?| "
- - "L0.?[1686869244945945921,1686870115756756730] 1686936871.55s 3mb |L0.?| "
- - "L0.?[1686870115756756731,1686870986567567540] 1686936871.55s 3mb |L0.?|"
- - "L0.?[1686870986567567541,1686871559000000000] 1686936871.55s 2mb |L0.?|"
- - "**** Simulation run 22, type=split(HighL0OverlapTotalBacklog)(split_times=[1686845733054054050, 1686846603864864860, 1686847474675675670, 1686848345486486480, 1686849216297297290]). 1 Input Files, 5mb total:"
- - "L1, all files 5mb "
- - "L1.24[1686845639000000000,1686849779000000000] 1686928854.57s|-----------------------------------------L1.24------------------------------------------|"
- - "**** 6 Output Files (parquet_file_id not yet assigned), 5mb total:"
- - "L1 "
- - "L1.?[1686845639000000000,1686845733054054050] 1686928854.57s 123kb|L1.?| "
- - "L1.?[1686845733054054051,1686846603864864860] 1686928854.57s 1mb |------L1.?------| "
- - "L1.?[1686846603864864861,1686847474675675670] 1686928854.57s 1mb |------L1.?------| "
- - "L1.?[1686847474675675671,1686848345486486480] 1686928854.57s 1mb |------L1.?------| "
- - "L1.?[1686848345486486481,1686849216297297290] 1686928854.57s 1mb |------L1.?------| "
- - "L1.?[1686849216297297291,1686849779000000000] 1686928854.57s 734kb |---L1.?---| "
- - "**** Simulation run 23, type=split(HighL0OverlapTotalBacklog)(split_times=[1686850087108108100, 1686850957918918910, 1686851828729729720, 1686852699540540530, 1686853570351351340, 1686854441162162150, 1686855311972972960, 1686856182783783770, 1686857053594594580, 1686857924405405390]). 1 Input Files, 100mb total:"
- - "L0, all files 100mb "
- - "L0.26[1686849779000000000,1686858119000000000] 1686932458.05s|-----------------------------------------L0.26------------------------------------------|"
- - "**** 11 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L0 "
- - "L0.?[1686849779000000000,1686850087108108100] 1686932458.05s 4mb|L0.?| "
- - "L0.?[1686850087108108101,1686850957918918910] 1686932458.05s 10mb |-L0.?--| "
- - "L0.?[1686850957918918911,1686851828729729720] 1686932458.05s 10mb |-L0.?--| "
- - "L0.?[1686851828729729721,1686852699540540530] 1686932458.05s 10mb |-L0.?--| "
- - "L0.?[1686852699540540531,1686853570351351340] 1686932458.05s 10mb |-L0.?--| "
- - "L0.?[1686853570351351341,1686854441162162150] 1686932458.05s 10mb |-L0.?--| "
- - "L0.?[1686854441162162151,1686855311972972960] 1686932458.05s 10mb |-L0.?--| "
- - "L0.?[1686855311972972961,1686856182783783770] 1686932458.05s 10mb |-L0.?--| "
- - "L0.?[1686856182783783771,1686857053594594580] 1686932458.05s 10mb |-L0.?--| "
- - "L0.?[1686857053594594581,1686857924405405390] 1686932458.05s 10mb |-L0.?--| "
- - "L0.?[1686857924405405391,1686858119000000000] 1686932458.05s 2mb |L0.?|"
- - "**** Simulation run 24, type=split(HighL0OverlapTotalBacklog)(split_times=[1686850087108108100]). 1 Input Files, 975kb total:"
- - "L1, all files 975kb "
- - "L1.28[1686849839000000000,1686850559000000000] 1686928854.57s|-----------------------------------------L1.28------------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 975kb total:"
- - "L1 "
- - "L1.?[1686849839000000000,1686850087108108100] 1686928854.57s 336kb|------------L1.?-------------| "
- - "L1.?[1686850087108108101,1686850559000000000] 1686928854.57s 639kb |--------------------------L1.?--------------------------| "
- - "**** Simulation run 25, type=split(HighL0OverlapTotalBacklog)(split_times=[1686850957918918910, 1686851828729729720, 1686852699540540530, 1686853570351351340, 1686854441162162150]). 1 Input Files, 5mb total:"
- - "L1, all files 5mb "
- - "L1.29[1686850619000000000,1686854819000000000] 1686928854.57s|-----------------------------------------L1.29------------------------------------------|"
- - "**** 6 Output Files (parquet_file_id not yet assigned), 5mb total:"
- - "L1 "
- - "L1.?[1686850619000000000,1686850957918918910] 1686928854.57s 440kb|L1.?-| "
- - "L1.?[1686850957918918911,1686851828729729720] 1686928854.57s 1mb |------L1.?------| "
- - "L1.?[1686851828729729721,1686852699540540530] 1686928854.57s 1mb |------L1.?------| "
- - "L1.?[1686852699540540531,1686853570351351340] 1686928854.57s 1mb |------L1.?------| "
- - "L1.?[1686853570351351341,1686854441162162150] 1686928854.57s 1mb |------L1.?------| "
- - "L1.?[1686854441162162151,1686854819000000000] 1686928854.57s 490kb |-L1.?-| "
- - "**** Simulation run 26, type=split(HighL0OverlapTotalBacklog)(split_times=[1686852699540540530, 1686853570351351340, 1686854441162162150, 1686855311972972960, 1686856182783783770, 1686857053594594580, 1686857924405405390, 1686858795216216200, 1686859666027027010, 1686860536837837820, 1686861407648648630, 1686862278459459440]). 1 Input Files, 101mb total:"
- - "L0, all files 101mb "
- - "L0.31[1686852179000000000,1686862859000000000] 1686934254.96s|-----------------------------------------L0.31------------------------------------------|"
- - "**** 13 Output Files (parquet_file_id not yet assigned), 101mb total:"
- - "L0 "
- - "L0.?[1686852179000000000,1686852699540540530] 1686934254.96s 5mb|L0.?| "
- - "L0.?[1686852699540540531,1686853570351351340] 1686934254.96s 8mb |L0.?-| "
- - "L0.?[1686853570351351341,1686854441162162150] 1686934254.96s 8mb |L0.?-| "
- - "L0.?[1686854441162162151,1686855311972972960] 1686934254.96s 8mb |L0.?-| "
- - "L0.?[1686855311972972961,1686856182783783770] 1686934254.96s 8mb |L0.?-| "
- - "L0.?[1686856182783783771,1686857053594594580] 1686934254.96s 8mb |L0.?-| "
- - "L0.?[1686857053594594581,1686857924405405390] 1686934254.96s 8mb |L0.?-| "
- - "L0.?[1686857924405405391,1686858795216216200] 1686934254.96s 8mb |L0.?-| "
- - "L0.?[1686858795216216201,1686859666027027010] 1686934254.96s 8mb |L0.?-| "
- - "L0.?[1686859666027027011,1686860536837837820] 1686934254.96s 8mb |L0.?-| "
- - "L0.?[1686860536837837821,1686861407648648630] 1686934254.96s 8mb |L0.?-| "
- - "L0.?[1686861407648648631,1686862278459459440] 1686934254.96s 8mb |L0.?-| "
- - "L0.?[1686862278459459441,1686862859000000000] 1686934254.96s 5mb |L0.?|"
- - "**** Simulation run 27, type=split(HighL0OverlapTotalBacklog)(split_times=[1686853570351351340, 1686854441162162150, 1686855311972972960, 1686856182783783770, 1686857053594594580, 1686857924405405390, 1686858795216216200, 1686859666027027010, 1686860536837837820, 1686861407648648630, 1686862278459459440, 1686863149270270250, 1686864020081081060]). 1 Input Files, 133mb total:"
- - "L0, all files 133mb "
- - "L0.33[1686852899000000000,1686864359000000000] 1686929712.33s|-----------------------------------------L0.33------------------------------------------|"
- - "**** 14 Output Files (parquet_file_id not yet assigned), 133mb total:"
- - "L0 "
- - "L0.?[1686852899000000000,1686853570351351340] 1686929712.33s 8mb|L0.?| "
- - "L0.?[1686853570351351341,1686854441162162150] 1686929712.33s 10mb |L0.?| "
- - "L0.?[1686854441162162151,1686855311972972960] 1686929712.33s 10mb |L0.?| "
- - "L0.?[1686855311972972961,1686856182783783770] 1686929712.33s 10mb |L0.?| "
- - "L0.?[1686856182783783771,1686857053594594580] 1686929712.33s 10mb |L0.?| "
- - "L0.?[1686857053594594581,1686857924405405390] 1686929712.33s 10mb |L0.?| "
- - "L0.?[1686857924405405391,1686858795216216200] 1686929712.33s 10mb |L0.?| "
- - "L0.?[1686858795216216201,1686859666027027010] 1686929712.33s 10mb |L0.?| "
- - "L0.?[1686859666027027011,1686860536837837820] 1686929712.33s 10mb |L0.?| "
- - "L0.?[1686860536837837821,1686861407648648630] 1686929712.33s 10mb |L0.?| "
- - "L0.?[1686861407648648631,1686862278459459440] 1686929712.33s 10mb |L0.?| "
- - "L0.?[1686862278459459441,1686863149270270250] 1686929712.33s 10mb |L0.?| "
- - "L0.?[1686863149270270251,1686864020081081060] 1686929712.33s 10mb |L0.?| "
- - "L0.?[1686864020081081061,1686864359000000000] 1686929712.33s 4mb |L0.?|"
- - "**** Simulation run 28, type=split(HighL0OverlapTotalBacklog)(split_times=[1686853570351351340, 1686854441162162150, 1686855311972972960, 1686856182783783770, 1686857053594594580, 1686857924405405390, 1686858795216216200, 1686859666027027010, 1686860536837837820, 1686861407648648630, 1686862278459459440, 1686863149270270250, 1686864020081081060]). 1 Input Files, 133mb total:"
- - "L0, all files 133mb "
- - "L0.32[1686852899000000000,1686864359000000000] 1686935742.51s|-----------------------------------------L0.32------------------------------------------|"
- - "**** 14 Output Files (parquet_file_id not yet assigned), 133mb total:"
- - "L0 "
- - "L0.?[1686852899000000000,1686853570351351340] 1686935742.51s 8mb|L0.?| "
- - "L0.?[1686853570351351341,1686854441162162150] 1686935742.51s 10mb |L0.?| "
- - "L0.?[1686854441162162151,1686855311972972960] 1686935742.51s 10mb |L0.?| "
- - "L0.?[1686855311972972961,1686856182783783770] 1686935742.51s 10mb |L0.?| "
- - "L0.?[1686856182783783771,1686857053594594580] 1686935742.51s 10mb |L0.?| "
- - "L0.?[1686857053594594581,1686857924405405390] 1686935742.51s 10mb |L0.?| "
- - "L0.?[1686857924405405391,1686858795216216200] 1686935742.51s 10mb |L0.?| "
- - "L0.?[1686858795216216201,1686859666027027010] 1686935742.51s 10mb |L0.?| "
- - "L0.?[1686859666027027011,1686860536837837820] 1686935742.51s 10mb |L0.?| "
- - "L0.?[1686860536837837821,1686861407648648630] 1686935742.51s 10mb |L0.?| "
- - "L0.?[1686861407648648631,1686862278459459440] 1686935742.51s 10mb |L0.?| "
- - "L0.?[1686862278459459441,1686863149270270250] 1686935742.51s 10mb |L0.?| "
- - "L0.?[1686863149270270251,1686864020081081060] 1686935742.51s 10mb |L0.?| "
- - "L0.?[1686864020081081061,1686864359000000000] 1686935742.51s 4mb |L0.?|"
- - "**** Simulation run 29, type=split(HighL0OverlapTotalBacklog)(split_times=[1686853570351351340, 1686854441162162150, 1686855311972972960, 1686856182783783770, 1686857053594594580, 1686857924405405390, 1686858795216216200, 1686859666027027010, 1686860536837837820, 1686861407648648630, 1686862278459459440, 1686863149270270250, 1686864020081081060]). 1 Input Files, 131mb total:"
- - "L0, all files 131mb "
- - "L0.34[1686852959000000000,1686864419000000000] 1686931600.58s|-----------------------------------------L0.34------------------------------------------|"
- - "**** 14 Output Files (parquet_file_id not yet assigned), 131mb total:"
- - "L0 "
- - "L0.?[1686852959000000000,1686853570351351340] 1686931600.58s 7mb|L0.?| "
- - "L0.?[1686853570351351341,1686854441162162150] 1686931600.58s 10mb |L0.?| "
- - "L0.?[1686854441162162151,1686855311972972960] 1686931600.58s 10mb |L0.?| "
- - "L0.?[1686855311972972961,1686856182783783770] 1686931600.58s 10mb |L0.?| "
- - "L0.?[1686856182783783771,1686857053594594580] 1686931600.58s 10mb |L0.?| "
- - "L0.?[1686857053594594581,1686857924405405390] 1686931600.58s 10mb |L0.?| "
- - "L0.?[1686857924405405391,1686858795216216200] 1686931600.58s 10mb |L0.?| "
- - "L0.?[1686858795216216201,1686859666027027010] 1686931600.58s 10mb |L0.?| "
- - "L0.?[1686859666027027011,1686860536837837820] 1686931600.58s 10mb |L0.?| "
- - "L0.?[1686860536837837821,1686861407648648630] 1686931600.58s 10mb |L0.?| "
- - "L0.?[1686861407648648631,1686862278459459440] 1686931600.58s 10mb |L0.?| "
- - "L0.?[1686862278459459441,1686863149270270250] 1686931600.58s 10mb |L0.?| "
- - "L0.?[1686863149270270251,1686864020081081060] 1686931600.58s 10mb |L0.?| "
- - "L0.?[1686864020081081061,1686864419000000000] 1686931600.58s 5mb |L0.?|"
- - "**** Simulation run 30, type=split(HighL0OverlapTotalBacklog)(split_times=[1686853570351351340, 1686854441162162150, 1686855311972972960, 1686856182783783770, 1686857053594594580, 1686857924405405390, 1686858795216216200, 1686859666027027010, 1686860536837837820, 1686861407648648630, 1686862278459459440, 1686863149270270250, 1686864020081081060]). 1 Input Files, 133mb total:"
- - "L0, all files 133mb "
- - "L0.35[1686852959000000000,1686864419000000000] 1686934966.48s|-----------------------------------------L0.35------------------------------------------|"
- - "**** 14 Output Files (parquet_file_id not yet assigned), 133mb total:"
- - "L0 "
- - "L0.?[1686852959000000000,1686853570351351340] 1686934966.48s 7mb|L0.?| "
- - "L0.?[1686853570351351341,1686854441162162150] 1686934966.48s 10mb |L0.?| "
- - "L0.?[1686854441162162151,1686855311972972960] 1686934966.48s 10mb |L0.?| "
- - "L0.?[1686855311972972961,1686856182783783770] 1686934966.48s 10mb |L0.?| "
- - "L0.?[1686856182783783771,1686857053594594580] 1686934966.48s 10mb |L0.?| "
- - "L0.?[1686857053594594581,1686857924405405390] 1686934966.48s 10mb |L0.?| "
- - "L0.?[1686857924405405391,1686858795216216200] 1686934966.48s 10mb |L0.?| "
- - "L0.?[1686858795216216201,1686859666027027010] 1686934966.48s 10mb |L0.?| "
- - "L0.?[1686859666027027011,1686860536837837820] 1686934966.48s 10mb |L0.?| "
- - "L0.?[1686860536837837821,1686861407648648630] 1686934966.48s 10mb |L0.?| "
- - "L0.?[1686861407648648631,1686862278459459440] 1686934966.48s 10mb |L0.?| "
- - "L0.?[1686862278459459441,1686863149270270250] 1686934966.48s 10mb |L0.?| "
- - "L0.?[1686863149270270251,1686864020081081060] 1686934966.48s 10mb |L0.?| "
- - "L0.?[1686864020081081061,1686864419000000000] 1686934966.48s 5mb |L0.?|"
- - "**** Simulation run 31, type=split(HighL0OverlapTotalBacklog)(split_times=[1686853570351351340, 1686854441162162150, 1686855311972972960, 1686856182783783770, 1686857053594594580, 1686857924405405390, 1686858795216216200, 1686859666027027010, 1686860536837837820, 1686861407648648630, 1686862278459459440, 1686863149270270250, 1686864020081081060]). 1 Input Files, 132mb total:"
- - "L0, all files 132mb "
- - "L0.36[1686853019000000000,1686864599000000000] 1686933528.17s|-----------------------------------------L0.36------------------------------------------|"
- - "**** 14 Output Files (parquet_file_id not yet assigned), 132mb total:"
- - "L0 "
- - "L0.?[1686853019000000000,1686853570351351340] 1686933528.17s 6mb|L0.?| "
- - "L0.?[1686853570351351341,1686854441162162150] 1686933528.17s 10mb |L0.?| "
- - "L0.?[1686854441162162151,1686855311972972960] 1686933528.17s 10mb |L0.?| "
- - "L0.?[1686855311972972961,1686856182783783770] 1686933528.17s 10mb |L0.?| "
- - "L0.?[1686856182783783771,1686857053594594580] 1686933528.17s 10mb |L0.?| "
- - "L0.?[1686857053594594581,1686857924405405390] 1686933528.17s 10mb |L0.?| "
- - "L0.?[1686857924405405391,1686858795216216200] 1686933528.17s 10mb |L0.?| "
- - "L0.?[1686858795216216201,1686859666027027010] 1686933528.17s 10mb |L0.?| "
- - "L0.?[1686859666027027011,1686860536837837820] 1686933528.17s 10mb |L0.?| "
- - "L0.?[1686860536837837821,1686861407648648630] 1686933528.17s 10mb |L0.?| "
- - "L0.?[1686861407648648631,1686862278459459440] 1686933528.17s 10mb |L0.?| "
- - "L0.?[1686862278459459441,1686863149270270250] 1686933528.17s 10mb |L0.?| "
- - "L0.?[1686863149270270251,1686864020081081060] 1686933528.17s 10mb |L0.?| "
- - "L0.?[1686864020081081061,1686864599000000000] 1686933528.17s 7mb |L0.?|"
- - "**** Simulation run 32, type=split(HighL0OverlapTotalBacklog)(split_times=[1686853570351351340, 1686854441162162150, 1686855311972972960, 1686856182783783770, 1686857053594594580, 1686857924405405390, 1686858795216216200, 1686859666027027010, 1686860536837837820, 1686861407648648630, 1686862278459459440, 1686863149270270250, 1686864020081081060]). 1 Input Files, 80mb total:"
- - "L0, all files 80mb "
- - "L0.38[1686853079000000000,1686864659000000000] 1686930563.07s|-----------------------------------------L0.38------------------------------------------|"
- - "**** 14 Output Files (parquet_file_id not yet assigned), 80mb total:"
- - "L0 "
- - "L0.?[1686853079000000000,1686853570351351340] 1686930563.07s 3mb|L0.?| "
- - "L0.?[1686853570351351341,1686854441162162150] 1686930563.07s 6mb |L0.?| "
- - "L0.?[1686854441162162151,1686855311972972960] 1686930563.07s 6mb |L0.?| "
- - "L0.?[1686855311972972961,1686856182783783770] 1686930563.07s 6mb |L0.?| "
- - "L0.?[1686856182783783771,1686857053594594580] 1686930563.07s 6mb |L0.?| "
- - "L0.?[1686857053594594581,1686857924405405390] 1686930563.07s 6mb |L0.?| "
- - "L0.?[1686857924405405391,1686858795216216200] 1686930563.07s 6mb |L0.?| "
- - "L0.?[1686858795216216201,1686859666027027010] 1686930563.07s 6mb |L0.?| "
- - "L0.?[1686859666027027011,1686860536837837820] 1686930563.07s 6mb |L0.?| "
- - "L0.?[1686860536837837821,1686861407648648630] 1686930563.07s 6mb |L0.?| "
- - "L0.?[1686861407648648631,1686862278459459440] 1686930563.07s 6mb |L0.?| "
- - "L0.?[1686862278459459441,1686863149270270250] 1686930563.07s 6mb |L0.?| "
- - "L0.?[1686863149270270251,1686864020081081060] 1686930563.07s 6mb |L0.?| "
- - "L0.?[1686864020081081061,1686864659000000000] 1686930563.07s 4mb |L0.?|"
- - "**** Simulation run 33, type=split(HighL0OverlapTotalBacklog)(split_times=[1686853570351351340, 1686854441162162150, 1686855311972972960, 1686856182783783770, 1686857053594594580, 1686857924405405390, 1686858795216216200, 1686859666027027010, 1686860536837837820, 1686861407648648630, 1686862278459459440, 1686863149270270250, 1686864020081081060]). 1 Input Files, 80mb total:"
- - "L0, all files 80mb "
- - "L0.37[1686853079000000000,1686864659000000000] 1686933271.57s|-----------------------------------------L0.37------------------------------------------|"
- - "**** 14 Output Files (parquet_file_id not yet assigned), 80mb total:"
- - "L0 "
- - "L0.?[1686853079000000000,1686853570351351340] 1686933271.57s 3mb|L0.?| "
- - "L0.?[1686853570351351341,1686854441162162150] 1686933271.57s 6mb |L0.?| "
- - "L0.?[1686854441162162151,1686855311972972960] 1686933271.57s 6mb |L0.?| "
- - "L0.?[1686855311972972961,1686856182783783770] 1686933271.57s 6mb |L0.?| "
- - "L0.?[1686856182783783771,1686857053594594580] 1686933271.57s 6mb |L0.?| "
- - "L0.?[1686857053594594581,1686857924405405390] 1686933271.57s 6mb |L0.?| "
- - "L0.?[1686857924405405391,1686858795216216200] 1686933271.57s 6mb |L0.?| "
- - "L0.?[1686858795216216201,1686859666027027010] 1686933271.57s 6mb |L0.?| "
- - "L0.?[1686859666027027011,1686860536837837820] 1686933271.57s 6mb |L0.?| "
- - "L0.?[1686860536837837821,1686861407648648630] 1686933271.57s 6mb |L0.?| "
- - "L0.?[1686861407648648631,1686862278459459440] 1686933271.57s 6mb |L0.?| "
- - "L0.?[1686862278459459441,1686863149270270250] 1686933271.57s 6mb |L0.?| "
- - "L0.?[1686863149270270251,1686864020081081060] 1686933271.57s 6mb |L0.?| "
- - "L0.?[1686864020081081061,1686864659000000000] 1686933271.57s 4mb |L0.?|"
- - "**** Simulation run 34, type=split(HighL0OverlapTotalBacklog)(split_times=[1686853570351351340, 1686854441162162150, 1686855311972972960, 1686856182783783770, 1686857053594594580, 1686857924405405390, 1686858795216216200, 1686859666027027010, 1686860536837837820, 1686861407648648630, 1686862278459459440, 1686863149270270250, 1686864020081081060]). 1 Input Files, 79mb total:"
- - "L0, all files 79mb "
- - "L0.39[1686853139000000000,1686864839000000000] 1686931336.08s|-----------------------------------------L0.39------------------------------------------|"
- - "**** 14 Output Files (parquet_file_id not yet assigned), 79mb total:"
- - "L0 "
- - "L0.?[1686853139000000000,1686853570351351340] 1686931336.08s 3mb|L0.?| "
- - "L0.?[1686853570351351341,1686854441162162150] 1686931336.08s 6mb |L0.?| "
- - "L0.?[1686854441162162151,1686855311972972960] 1686931336.08s 6mb |L0.?| "
- - "L0.?[1686855311972972961,1686856182783783770] 1686931336.08s 6mb |L0.?| "
- - "L0.?[1686856182783783771,1686857053594594580] 1686931336.08s 6mb |L0.?| "
- - "L0.?[1686857053594594581,1686857924405405390] 1686931336.08s 6mb |L0.?| "
- - "L0.?[1686857924405405391,1686858795216216200] 1686931336.08s 6mb |L0.?| "
- - "L0.?[1686858795216216201,1686859666027027010] 1686931336.08s 6mb |L0.?| "
- - "L0.?[1686859666027027011,1686860536837837820] 1686931336.08s 6mb |L0.?| "
- - "L0.?[1686860536837837821,1686861407648648630] 1686931336.08s 6mb |L0.?| "
- - "L0.?[1686861407648648631,1686862278459459440] 1686931336.08s 6mb |L0.?| "
- - "L0.?[1686862278459459441,1686863149270270250] 1686931336.08s 6mb |L0.?| "
- - "L0.?[1686863149270270251,1686864020081081060] 1686931336.08s 6mb |L0.?| "
- - "L0.?[1686864020081081061,1686864839000000000] 1686931336.08s 6mb |L0.?| "
- - "**** Simulation run 35, type=split(HighL0OverlapTotalBacklog)(split_times=[1686853570351351340, 1686854441162162150, 1686855311972972960, 1686856182783783770, 1686857053594594580, 1686857924405405390, 1686858795216216200, 1686859666027027010, 1686860536837837820, 1686861407648648630, 1686862278459459440, 1686863149270270250, 1686864020081081060, 1686864890891891870]). 1 Input Files, 77mb total:"
- - "L0, all files 77mb "
- - "L0.40[1686853379000000000,1686865259000000000] 1686929421.02s|-----------------------------------------L0.40------------------------------------------|"
- - "**** 15 Output Files (parquet_file_id not yet assigned), 77mb total:"
- - "L0 "
- - "L0.?[1686853379000000000,1686853570351351340] 1686929421.02s 1mb|L0.?| "
- - "L0.?[1686853570351351341,1686854441162162150] 1686929421.02s 6mb |L0.?| "
- - "L0.?[1686854441162162151,1686855311972972960] 1686929421.02s 6mb |L0.?| "
- - "L0.?[1686855311972972961,1686856182783783770] 1686929421.02s 6mb |L0.?| "
- - "L0.?[1686856182783783771,1686857053594594580] 1686929421.02s 6mb |L0.?| "
- - "L0.?[1686857053594594581,1686857924405405390] 1686929421.02s 6mb |L0.?| "
- - "L0.?[1686857924405405391,1686858795216216200] 1686929421.02s 6mb |L0.?| "
- - "L0.?[1686858795216216201,1686859666027027010] 1686929421.02s 6mb |L0.?| "
- - "L0.?[1686859666027027011,1686860536837837820] 1686929421.02s 6mb |L0.?| "
- - "L0.?[1686860536837837821,1686861407648648630] 1686929421.02s 6mb |L0.?| "
- - "L0.?[1686861407648648631,1686862278459459440] 1686929421.02s 6mb |L0.?| "
- - "L0.?[1686862278459459441,1686863149270270250] 1686929421.02s 6mb |L0.?| "
- - "L0.?[1686863149270270251,1686864020081081060] 1686929421.02s 6mb |L0.?| "
- - "L0.?[1686864020081081061,1686864890891891870] 1686929421.02s 6mb |L0.?| "
- - "L0.?[1686864890891891871,1686865259000000000] 1686929421.02s 2mb |L0.?|"
- - "**** Simulation run 36, type=split(HighL0OverlapTotalBacklog)(split_times=[1686853570351351340, 1686854441162162150, 1686855311972972960, 1686856182783783770, 1686857053594594580, 1686857924405405390, 1686858795216216200, 1686859666027027010, 1686860536837837820, 1686861407648648630, 1686862278459459440, 1686863149270270250, 1686864020081081060, 1686864890891891870]). 1 Input Files, 77mb total:"
- - "L0, all files 77mb "
- - "L0.41[1686853379000000000,1686865259000000000] 1686935546.05s|-----------------------------------------L0.41------------------------------------------|"
- - "**** 15 Output Files (parquet_file_id not yet assigned), 77mb total:"
- - "L0 "
- - "L0.?[1686853379000000000,1686853570351351340] 1686935546.05s 1mb|L0.?| "
- - "L0.?[1686853570351351341,1686854441162162150] 1686935546.05s 6mb |L0.?| "
- - "L0.?[1686854441162162151,1686855311972972960] 1686935546.05s 6mb |L0.?| "
- - "L0.?[1686855311972972961,1686856182783783770] 1686935546.05s 6mb |L0.?| "
- - "L0.?[1686856182783783771,1686857053594594580] 1686935546.05s 6mb |L0.?| "
- - "L0.?[1686857053594594581,1686857924405405390] 1686935546.05s 6mb |L0.?| "
- - "L0.?[1686857924405405391,1686858795216216200] 1686935546.05s 6mb |L0.?| "
- - "L0.?[1686858795216216201,1686859666027027010] 1686935546.05s 6mb |L0.?| "
- - "L0.?[1686859666027027011,1686860536837837820] 1686935546.05s 6mb |L0.?| "
- - "L0.?[1686860536837837821,1686861407648648630] 1686935546.05s 6mb |L0.?| "
- - "L0.?[1686861407648648631,1686862278459459440] 1686935546.05s 6mb |L0.?| "
- - "L0.?[1686862278459459441,1686863149270270250] 1686935546.05s 6mb |L0.?| "
- - "L0.?[1686863149270270251,1686864020081081060] 1686935546.05s 6mb |L0.?| "
- - "L0.?[1686864020081081061,1686864890891891870] 1686935546.05s 6mb |L0.?| "
- - "L0.?[1686864890891891871,1686865259000000000] 1686935546.05s 2mb |L0.?|"
- - "**** Simulation run 37, type=split(HighL0OverlapTotalBacklog)(split_times=[1686853570351351340, 1686854441162162150, 1686855311972972960, 1686856182783783770, 1686857053594594580, 1686857924405405390, 1686858795216216200, 1686859666027027010, 1686860536837837820, 1686861407648648630, 1686862278459459440, 1686863149270270250, 1686864020081081060, 1686864890891891870]). 1 Input Files, 124mb total:"
- - "L0, all files 124mb "
- - "L0.42[1686853439000000000,1686865439000000000] 1686930780.95s|-----------------------------------------L0.42------------------------------------------|"
- - "**** 15 Output Files (parquet_file_id not yet assigned), 124mb total:"
- - "L0 "
- - "L0.?[1686853439000000000,1686853570351351340] 1686930780.95s 1mb|L0.?| "
- - "L0.?[1686853570351351341,1686854441162162150] 1686930780.95s 9mb|L0.?| "
- - "L0.?[1686854441162162151,1686855311972972960] 1686930780.95s 9mb |L0.?| "
- - "L0.?[1686855311972972961,1686856182783783770] 1686930780.95s 9mb |L0.?| "
- - "L0.?[1686856182783783771,1686857053594594580] 1686930780.95s 9mb |L0.?| "
- - "L0.?[1686857053594594581,1686857924405405390] 1686930780.95s 9mb |L0.?| "
- - "L0.?[1686857924405405391,1686858795216216200] 1686930780.95s 9mb |L0.?| "
- - "L0.?[1686858795216216201,1686859666027027010] 1686930780.95s 9mb |L0.?| "
- - "L0.?[1686859666027027011,1686860536837837820] 1686930780.95s 9mb |L0.?| "
- - "L0.?[1686860536837837821,1686861407648648630] 1686930780.95s 9mb |L0.?| "
- - "L0.?[1686861407648648631,1686862278459459440] 1686930780.95s 9mb |L0.?| "
- - "L0.?[1686862278459459441,1686863149270270250] 1686930780.95s 9mb |L0.?| "
- - "L0.?[1686863149270270251,1686864020081081060] 1686930780.95s 9mb |L0.?| "
- - "L0.?[1686864020081081061,1686864890891891870] 1686930780.95s 9mb |L0.?| "
- - "L0.?[1686864890891891871,1686865439000000000] 1686930780.95s 6mb |L0.?|"
- - "**** Simulation run 38, type=split(HighL0OverlapTotalBacklog)(split_times=[1686854441162162150, 1686855311972972960, 1686856182783783770, 1686857053594594580, 1686857924405405390, 1686858795216216200, 1686859666027027010, 1686860536837837820, 1686861407648648630, 1686862278459459440, 1686863149270270250, 1686864020081081060, 1686864890891891870, 1686865761702702680]). 1 Input Files, 73mb total:"
- - "L0, all files 73mb "
- - "L0.43[1686853739000000000,1686866039000000000] 1686934759.75s|-----------------------------------------L0.43------------------------------------------|"
- - "**** 15 Output Files (parquet_file_id not yet assigned), 73mb total:"
- - "L0 "
- - "L0.?[1686853739000000000,1686854441162162150] 1686934759.75s 4mb|L0.?| "
- - "L0.?[1686854441162162151,1686855311972972960] 1686934759.75s 5mb |L0.?| "
- - "L0.?[1686855311972972961,1686856182783783770] 1686934759.75s 5mb |L0.?| "
- - "L0.?[1686856182783783771,1686857053594594580] 1686934759.75s 5mb |L0.?| "
- - "L0.?[1686857053594594581,1686857924405405390] 1686934759.75s 5mb |L0.?| "
- - "L0.?[1686857924405405391,1686858795216216200] 1686934759.75s 5mb |L0.?| "
- - "L0.?[1686858795216216201,1686859666027027010] 1686934759.75s 5mb |L0.?| "
- - "L0.?[1686859666027027011,1686860536837837820] 1686934759.75s 5mb |L0.?| "
- - "L0.?[1686860536837837821,1686861407648648630] 1686934759.75s 5mb |L0.?| "
- - "L0.?[1686861407648648631,1686862278459459440] 1686934759.75s 5mb |L0.?| "
- - "L0.?[1686862278459459441,1686863149270270250] 1686934759.75s 5mb |L0.?| "
- - "L0.?[1686863149270270251,1686864020081081060] 1686934759.75s 5mb |L0.?| "
- - "L0.?[1686864020081081061,1686864890891891870] 1686934759.75s 5mb |L0.?| "
- - "L0.?[1686864890891891871,1686865761702702680] 1686934759.75s 5mb |L0.?| "
- - "L0.?[1686865761702702681,1686866039000000000] 1686934759.75s 2mb |L0.?|"
- - "**** Simulation run 39, type=split(HighL0OverlapTotalBacklog)(split_times=[1686854441162162150, 1686855311972972960, 1686856182783783770, 1686857053594594580, 1686857924405405390, 1686858795216216200, 1686859666027027010, 1686860536837837820, 1686861407648648630, 1686862278459459440, 1686863149270270250, 1686864020081081060, 1686864890891891870, 1686865761702702680, 1686866632513513490]). 1 Input Files, 77mb total:"
- - "L0, all files 77mb "
- - "L0.44[1686854279000000000,1686867059000000000] 1686932677.39s|-----------------------------------------L0.44------------------------------------------|"
- - "**** 16 Output Files (parquet_file_id not yet assigned), 77mb total:"
- - "L0 "
- - "L0.?[1686854279000000000,1686854441162162150] 1686932677.39s 1006kb|L0.?| "
- - "L0.?[1686854441162162151,1686855311972972960] 1686932677.39s 5mb |L0.?| "
- - "L0.?[1686855311972972961,1686856182783783770] 1686932677.39s 5mb |L0.?| "
- - "L0.?[1686856182783783771,1686857053594594580] 1686932677.39s 5mb |L0.?| "
- - "L0.?[1686857053594594581,1686857924405405390] 1686932677.39s 5mb |L0.?| "
- - "L0.?[1686857924405405391,1686858795216216200] 1686932677.39s 5mb |L0.?| "
- - "L0.?[1686858795216216201,1686859666027027010] 1686932677.39s 5mb |L0.?| "
- - "L0.?[1686859666027027011,1686860536837837820] 1686932677.39s 5mb |L0.?| "
- - "L0.?[1686860536837837821,1686861407648648630] 1686932677.39s 5mb |L0.?| "
- - "L0.?[1686861407648648631,1686862278459459440] 1686932677.39s 5mb |L0.?| "
- - "L0.?[1686862278459459441,1686863149270270250] 1686932677.39s 5mb |L0.?| "
- - "L0.?[1686863149270270251,1686864020081081060] 1686932677.39s 5mb |L0.?| "
- - "L0.?[1686864020081081061,1686864890891891870] 1686932677.39s 5mb |L0.?| "
- - "L0.?[1686864890891891871,1686865761702702680] 1686932677.39s 5mb |L0.?| "
- - "L0.?[1686865761702702681,1686866632513513490] 1686932677.39s 5mb |L0.?| "
- - "L0.?[1686866632513513491,1686867059000000000] 1686932677.39s 3mb |L0.?|"
- - "**** Simulation run 40, type=split(HighL0OverlapTotalBacklog)(split_times=[1686855311972972960, 1686856182783783770, 1686857053594594580, 1686857924405405390, 1686858795216216200, 1686859666027027010, 1686860536837837820, 1686861407648648630, 1686862278459459440, 1686863149270270250, 1686864020081081060, 1686864890891891870, 1686865761702702680, 1686866632513513490, 1686867503324324300]). 1 Input Files, 89mb total:"
- - "L0, all files 89mb "
- - "L0.45[1686854819000000000,1686868199000000000] 1686935947.46s|-----------------------------------------L0.45------------------------------------------|"
- - "**** 16 Output Files (parquet_file_id not yet assigned), 89mb total:"
- - "L0 "
- - "L0.?[1686854819000000000,1686855311972972960] 1686935947.46s 3mb|L0.?| "
- - "L0.?[1686855311972972961,1686856182783783770] 1686935947.46s 6mb |L0.?| "
- - "L0.?[1686856182783783771,1686857053594594580] 1686935947.46s 6mb |L0.?| "
- - "L0.?[1686857053594594581,1686857924405405390] 1686935947.46s 6mb |L0.?| "
- - "L0.?[1686857924405405391,1686858795216216200] 1686935947.46s 6mb |L0.?| "
- - "L0.?[1686858795216216201,1686859666027027010] 1686935947.46s 6mb |L0.?| "
- - "L0.?[1686859666027027011,1686860536837837820] 1686935947.46s 6mb |L0.?| "
- - "L0.?[1686860536837837821,1686861407648648630] 1686935947.46s 6mb |L0.?| "
- - "L0.?[1686861407648648631,1686862278459459440] 1686935947.46s 6mb |L0.?| "
- - "L0.?[1686862278459459441,1686863149270270250] 1686935947.46s 6mb |L0.?| "
- - "L0.?[1686863149270270251,1686864020081081060] 1686935947.46s 6mb |L0.?| "
- - "L0.?[1686864020081081061,1686864890891891870] 1686935947.46s 6mb |L0.?| "
- - "L0.?[1686864890891891871,1686865761702702680] 1686935947.46s 6mb |L0.?| "
- - "L0.?[1686865761702702681,1686866632513513490] 1686935947.46s 6mb |L0.?| "
- - "L0.?[1686866632513513491,1686867503324324300] 1686935947.46s 6mb |L0.?| "
- - "L0.?[1686867503324324301,1686868199000000000] 1686935947.46s 5mb |L0.?|"
- - "**** Simulation run 41, type=split(HighL0OverlapTotalBacklog)(split_times=[1686855311972972960, 1686856182783783770, 1686857053594594580, 1686857924405405390, 1686858795216216200]). 1 Input Files, 5mb total:"
- - "L1, all files 5mb "
- - "L1.47[1686854879000000000,1686859019000000000] 1686928854.57s|-----------------------------------------L1.47------------------------------------------|"
- - "**** 6 Output Files (parquet_file_id not yet assigned), 5mb total:"
- - "L1 "
- - "L1.?[1686854879000000000,1686855311972972960] 1686928854.57s 556kb|-L1.?--| "
- - "L1.?[1686855311972972961,1686856182783783770] 1686928854.57s 1mb |------L1.?------| "
- - "L1.?[1686856182783783771,1686857053594594580] 1686928854.57s 1mb |------L1.?------| "
- - "L1.?[1686857053594594581,1686857924405405390] 1686928854.57s 1mb |------L1.?------| "
- - "L1.?[1686857924405405391,1686858795216216200] 1686928854.57s 1mb |------L1.?------| "
- - "L1.?[1686858795216216201,1686859019000000000] 1686928854.57s 287kb |L1.?|"
- - "**** Simulation run 42, type=split(HighL0OverlapTotalBacklog)(split_times=[1686855311972972960, 1686856182783783770, 1686857053594594580, 1686857924405405390, 1686858795216216200, 1686859666027027010, 1686860536837837820, 1686861407648648630, 1686862278459459440, 1686863149270270250, 1686864020081081060, 1686864890891891870, 1686865761702702680, 1686866632513513490, 1686867503324324300, 1686868374135135110]). 1 Input Files, 93mb total:"
- - "L0, all files 93mb "
- - "L0.48[1686855119000000000,1686868739000000000] 1686929965.33s|-----------------------------------------L0.48------------------------------------------|"
- - "**** 17 Output Files (parquet_file_id not yet assigned), 93mb total:"
- - "L0 "
- - "L0.?[1686855119000000000,1686855311972972960] 1686929965.33s 1mb|L0.?| "
- - "L0.?[1686855311972972961,1686856182783783770] 1686929965.33s 6mb |L0.?| "
- - "L0.?[1686856182783783771,1686857053594594580] 1686929965.33s 6mb |L0.?| "
- - "L0.?[1686857053594594581,1686857924405405390] 1686929965.33s 6mb |L0.?| "
- - "L0.?[1686857924405405391,1686858795216216200] 1686929965.33s 6mb |L0.?| "
- - "L0.?[1686858795216216201,1686859666027027010] 1686929965.33s 6mb |L0.?| "
- - "L0.?[1686859666027027011,1686860536837837820] 1686929965.33s 6mb |L0.?| "
- - "L0.?[1686860536837837821,1686861407648648630] 1686929965.33s 6mb |L0.?| "
- - "L0.?[1686861407648648631,1686862278459459440] 1686929965.33s 6mb |L0.?| "
- - "L0.?[1686862278459459441,1686863149270270250] 1686929965.33s 6mb |L0.?| "
- - "L0.?[1686863149270270251,1686864020081081060] 1686929965.33s 6mb |L0.?| "
- - "L0.?[1686864020081081061,1686864890891891870] 1686929965.33s 6mb |L0.?| "
- - "L0.?[1686864890891891871,1686865761702702680] 1686929965.33s 6mb |L0.?| "
- - "L0.?[1686865761702702681,1686866632513513490] 1686929965.33s 6mb |L0.?| "
- - "L0.?[1686866632513513491,1686867503324324300] 1686929965.33s 6mb |L0.?| "
- - "L0.?[1686867503324324301,1686868374135135110] 1686929965.33s 6mb |L0.?| "
- - "L0.?[1686868374135135111,1686868739000000000] 1686929965.33s 2mb |L0.?|"
- - "**** Simulation run 43, type=split(HighL0OverlapTotalBacklog)(split_times=[1686855311972972960, 1686856182783783770, 1686857053594594580, 1686857924405405390, 1686858795216216200, 1686859666027027010, 1686860536837837820, 1686861407648648630, 1686862278459459440, 1686863149270270250, 1686864020081081060, 1686864890891891870, 1686865761702702680, 1686866632513513490, 1686867503324324300, 1686868374135135110]). 1 Input Files, 92mb total:"
- - "L0, all files 92mb "
- - "L0.49[1686855179000000000,1686868859000000000] 1686933830.06s|-----------------------------------------L0.49------------------------------------------|"
- - "**** 17 Output Files (parquet_file_id not yet assigned), 92mb total:"
- - "L0 "
- - "L0.?[1686855179000000000,1686855311972972960] 1686933830.06s 920kb|L0.?| "
- - "L0.?[1686855311972972961,1686856182783783770] 1686933830.06s 6mb|L0.?| "
- - "L0.?[1686856182783783771,1686857053594594580] 1686933830.06s 6mb |L0.?| "
- - "L0.?[1686857053594594581,1686857924405405390] 1686933830.06s 6mb |L0.?| "
- - "L0.?[1686857924405405391,1686858795216216200] 1686933830.06s 6mb |L0.?| "
- - "L0.?[1686858795216216201,1686859666027027010] 1686933830.06s 6mb |L0.?| "
- - "L0.?[1686859666027027011,1686860536837837820] 1686933830.06s 6mb |L0.?| "
- - "L0.?[1686860536837837821,1686861407648648630] 1686933830.06s 6mb |L0.?| "
- - "L0.?[1686861407648648631,1686862278459459440] 1686933830.06s 6mb |L0.?| "
- - "L0.?[1686862278459459441,1686863149270270250] 1686933830.06s 6mb |L0.?| "
- - "L0.?[1686863149270270251,1686864020081081060] 1686933830.06s 6mb |L0.?| "
- - "L0.?[1686864020081081061,1686864890891891870] 1686933830.06s 6mb |L0.?| "
- - "L0.?[1686864890891891871,1686865761702702680] 1686933830.06s 6mb |L0.?| "
- - "L0.?[1686865761702702681,1686866632513513490] 1686933830.06s 6mb |L0.?| "
- - "L0.?[1686866632513513491,1686867503324324300] 1686933830.06s 6mb |L0.?| "
- - "L0.?[1686867503324324301,1686868374135135110] 1686933830.06s 6mb |L0.?| "
- - "L0.?[1686868374135135111,1686868859000000000] 1686933830.06s 3mb |L0.?|"
- - "**** Simulation run 44, type=split(HighL0OverlapTotalBacklog)(split_times=[1686856182783783770, 1686857053594594580, 1686857924405405390, 1686858795216216200, 1686859666027027010, 1686860536837837820, 1686861407648648630, 1686862278459459440, 1686863149270270250, 1686864020081081060, 1686864890891891870, 1686865761702702680, 1686866632513513490, 1686867503324324300, 1686868374135135110, 1686869244945945920]). 1 Input Files, 97mb total:"
- - "L0, all files 97mb "
- - "L0.50[1686855479000000000,1686869519000000000] 1686935151.54s|-----------------------------------------L0.50------------------------------------------|"
- - "**** 17 Output Files (parquet_file_id not yet assigned), 97mb total:"
- - "L0 "
- - "L0.?[1686855479000000000,1686856182783783770] 1686935151.54s 5mb|L0.?| "
- - "L0.?[1686856182783783771,1686857053594594580] 1686935151.54s 6mb |L0.?| "
- - "L0.?[1686857053594594581,1686857924405405390] 1686935151.54s 6mb |L0.?| "
- - "L0.?[1686857924405405391,1686858795216216200] 1686935151.54s 6mb |L0.?| "
- - "L0.?[1686858795216216201,1686859666027027010] 1686935151.54s 6mb |L0.?| "
- - "L0.?[1686859666027027011,1686860536837837820] 1686935151.54s 6mb |L0.?| "
- - "L0.?[1686860536837837821,1686861407648648630] 1686935151.54s 6mb |L0.?| "
- - "L0.?[1686861407648648631,1686862278459459440] 1686935151.54s 6mb |L0.?| "
- - "L0.?[1686862278459459441,1686863149270270250] 1686935151.54s 6mb |L0.?| "
- - "L0.?[1686863149270270251,1686864020081081060] 1686935151.54s 6mb |L0.?| "
- - "L0.?[1686864020081081061,1686864890891891870] 1686935151.54s 6mb |L0.?| "
- - "L0.?[1686864890891891871,1686865761702702680] 1686935151.54s 6mb |L0.?| "
- - "L0.?[1686865761702702681,1686866632513513490] 1686935151.54s 6mb |L0.?| "
- - "L0.?[1686866632513513491,1686867503324324300] 1686935151.54s 6mb |L0.?| "
- - "L0.?[1686867503324324301,1686868374135135110] 1686935151.54s 6mb |L0.?| "
- - "L0.?[1686868374135135111,1686869244945945920] 1686935151.54s 6mb |L0.?| "
- - "L0.?[1686869244945945921,1686869519000000000] 1686935151.54s 2mb |L0.?|"
- - "**** Simulation run 45, type=split(HighL0OverlapTotalBacklog)(split_times=[1686856182783783770, 1686857053594594580, 1686857924405405390, 1686858795216216200, 1686859666027027010, 1686860536837837820, 1686861407648648630, 1686862278459459440, 1686863149270270250, 1686864020081081060, 1686864890891891870, 1686865761702702680, 1686866632513513490, 1686867503324324300, 1686868374135135110, 1686869244945945920]). 1 Input Files, 95mb total:"
- - "L0, all files 95mb "
- - "L0.51[1686855719000000000,1686869939000000000] 1686931893.7s|-----------------------------------------L0.51------------------------------------------|"
- - "**** 17 Output Files (parquet_file_id not yet assigned), 95mb total:"
- - "L0 "
- - "L0.?[1686855719000000000,1686856182783783770] 1686931893.7s 3mb|L0.?| "
- - "L0.?[1686856182783783771,1686857053594594580] 1686931893.7s 6mb |L0.?| "
- - "L0.?[1686857053594594581,1686857924405405390] 1686931893.7s 6mb |L0.?| "
- - "L0.?[1686857924405405391,1686858795216216200] 1686931893.7s 6mb |L0.?| "
- - "L0.?[1686858795216216201,1686859666027027010] 1686931893.7s 6mb |L0.?| "
- - "L0.?[1686859666027027011,1686860536837837820] 1686931893.7s 6mb |L0.?| "
- - "L0.?[1686860536837837821,1686861407648648630] 1686931893.7s 6mb |L0.?| "
- - "L0.?[1686861407648648631,1686862278459459440] 1686931893.7s 6mb |L0.?| "
- - "L0.?[1686862278459459441,1686863149270270250] 1686931893.7s 6mb |L0.?| "
- - "L0.?[1686863149270270251,1686864020081081060] 1686931893.7s 6mb |L0.?| "
- - "L0.?[1686864020081081061,1686864890891891870] 1686931893.7s 6mb |L0.?| "
- - "L0.?[1686864890891891871,1686865761702702680] 1686931893.7s 6mb |L0.?| "
- - "L0.?[1686865761702702681,1686866632513513490] 1686931893.7s 6mb |L0.?| "
- - "L0.?[1686866632513513491,1686867503324324300] 1686931893.7s 6mb |L0.?| "
- - "L0.?[1686867503324324301,1686868374135135110] 1686931893.7s 6mb |L0.?| "
- - "L0.?[1686868374135135111,1686869244945945920] 1686931893.7s 6mb |L0.?| "
- - "L0.?[1686869244945945921,1686869939000000000] 1686931893.7s 5mb |L0.?|"
- - "**** Simulation run 46, type=split(HighL0OverlapTotalBacklog)(split_times=[1686858795216216200, 1686859666027027010, 1686860536837837820, 1686861407648648630, 1686862278459459440, 1686863149270270250, 1686864020081081060, 1686864890891891870, 1686865761702702680]). 1 Input Files, 94mb total:"
- - "L0, all files 94mb "
- - "L0.52[1686858179000000000,1686865979000000000] 1686932458.05s|-----------------------------------------L0.52------------------------------------------|"
- - "**** 10 Output Files (parquet_file_id not yet assigned), 94mb total:"
- - "L0 "
- - "L0.?[1686858179000000000,1686858795216216200] 1686932458.05s 7mb|L0.?-| "
- - "L0.?[1686858795216216201,1686859666027027010] 1686932458.05s 10mb |--L0.?--| "
- - "L0.?[1686859666027027011,1686860536837837820] 1686932458.05s 10mb |--L0.?--| "
- - "L0.?[1686860536837837821,1686861407648648630] 1686932458.05s 10mb |--L0.?--| "
- - "L0.?[1686861407648648631,1686862278459459440] 1686932458.05s 10mb |--L0.?--| "
- - "L0.?[1686862278459459441,1686863149270270250] 1686932458.05s 10mb |--L0.?--| "
- - "L0.?[1686863149270270251,1686864020081081060] 1686932458.05s 10mb |--L0.?--| "
- - "L0.?[1686864020081081061,1686864890891891870] 1686932458.05s 10mb |--L0.?--| "
- - "L0.?[1686864890891891871,1686865761702702680] 1686932458.05s 10mb |--L0.?--| "
- - "L0.?[1686865761702702681,1686865979000000000] 1686932458.05s 3mb |L0.?|"
- - "**** Simulation run 47, type=split(HighL0OverlapTotalBacklog)(split_times=[1686859666027027010, 1686860536837837820, 1686861407648648630, 1686862278459459440, 1686863149270270250]). 1 Input Files, 5mb total:"
- - "L1, all files 5mb "
- - "L1.55[1686859559000000000,1686863699000000000] 1686928854.57s|-----------------------------------------L1.55------------------------------------------|"
- - "**** 6 Output Files (parquet_file_id not yet assigned), 5mb total:"
- - "L1 "
- - "L1.?[1686859559000000000,1686859666027027010] 1686928854.57s 137kb|L1.?| "
- - "L1.?[1686859666027027011,1686860536837837820] 1686928854.57s 1mb |------L1.?------| "
- - "L1.?[1686860536837837821,1686861407648648630] 1686928854.57s 1mb |------L1.?------| "
- - "L1.?[1686861407648648631,1686862278459459440] 1686928854.57s 1mb |------L1.?------| "
- - "L1.?[1686862278459459441,1686863149270270250] 1686928854.57s 1mb |------L1.?------| "
- - "L1.?[1686863149270270251,1686863699000000000] 1686928854.57s 703kb |--L1.?---| "
- - "**** Simulation run 48, type=split(HighL0OverlapTotalBacklog)(split_times=[1686863149270270250, 1686864020081081060, 1686864890891891870, 1686865761702702680, 1686866632513513490, 1686867503324324300, 1686868374135135110, 1686869244945945920, 1686870115756756730, 1686870986567567540, 1686871857378378350, 1686872728189189160]). 1 Input Files, 98mb total:"
- - "L0, all files 98mb "
- - "L0.57[1686862919000000000,1686873599000000000] 1686934254.96s|-----------------------------------------L0.57------------------------------------------|"
- - "**** 13 Output Files (parquet_file_id not yet assigned), 98mb total:"
- - "L0 "
- - "L0.?[1686862919000000000,1686863149270270250] 1686934254.96s 2mb|L0.?| "
- - "L0.?[1686863149270270251,1686864020081081060] 1686934254.96s 8mb |L0.?-| "
- - "L0.?[1686864020081081061,1686864890891891870] 1686934254.96s 8mb |L0.?-| "
- - "L0.?[1686864890891891871,1686865761702702680] 1686934254.96s 8mb |L0.?-| "
- - "L0.?[1686865761702702681,1686866632513513490] 1686934254.96s 8mb |L0.?-| "
- - "L0.?[1686866632513513491,1686867503324324300] 1686934254.96s 8mb |L0.?-| "
- - "L0.?[1686867503324324301,1686868374135135110] 1686934254.96s 8mb |L0.?-| "
- - "L0.?[1686868374135135111,1686869244945945920] 1686934254.96s 8mb |L0.?-| "
- - "L0.?[1686869244945945921,1686870115756756730] 1686934254.96s 8mb |L0.?-| "
- - "L0.?[1686870115756756731,1686870986567567540] 1686934254.96s 8mb |L0.?-| "
- - "L0.?[1686870986567567541,1686871857378378350] 1686934254.96s 8mb |L0.?-| "
- - "L0.?[1686871857378378351,1686872728189189160] 1686934254.96s 8mb |L0.?-| "
- - "L0.?[1686872728189189161,1686873599000000000] 1686934254.96s 8mb |L0.?-| "
- - "**** Simulation run 49, type=split(HighL0OverlapTotalBacklog)(split_times=[1686864020081081060, 1686864890891891870, 1686865761702702680, 1686866632513513490, 1686867503324324300]). 1 Input Files, 5mb total:"
- - "L1, all files 5mb "
- - "L1.58[1686863759000000000,1686867659000000000] 1686928854.57s|-----------------------------------------L1.58------------------------------------------|"
- - "**** 6 Output Files (parquet_file_id not yet assigned), 5mb total:"
- - "L1 "
- - "L1.?[1686863759000000000,1686864020081081060] 1686928854.57s 329kb|L1.?| "
- - "L1.?[1686864020081081061,1686864890891891870] 1686928854.57s 1mb |-------L1.?-------| "
- - "L1.?[1686864890891891871,1686865761702702680] 1686928854.57s 1mb |-------L1.?-------| "
- - "L1.?[1686865761702702681,1686866632513513490] 1686928854.57s 1mb |-------L1.?-------| "
- - "L1.?[1686866632513513491,1686867503324324300] 1686928854.57s 1mb |-------L1.?-------| "
- - "L1.?[1686867503324324301,1686867659000000000] 1686928854.57s 196kb |L1.?|"
- - "**** Simulation run 50, type=split(HighL0OverlapTotalBacklog)(split_times=[1686864890891891870, 1686865761702702680, 1686866632513513490, 1686867503324324300, 1686868374135135110, 1686869244945945920, 1686870115756756730, 1686870986567567540, 1686871857378378350, 1686872728189189160]). 1 Input Files, 75mb total:"
- - "L0, all files 75mb "
- - "L0.60[1686864419000000000,1686873599000000000] 1686929712.33s|-----------------------------------------L0.60------------------------------------------|"
- - "**** 11 Output Files (parquet_file_id not yet assigned), 75mb total:"
- - "L0 "
- - "L0.?[1686864419000000000,1686864890891891870] 1686929712.33s 4mb|L0.?| "
- - "L0.?[1686864890891891871,1686865761702702680] 1686929712.33s 7mb |-L0.?-| "
- - "L0.?[1686865761702702681,1686866632513513490] 1686929712.33s 7mb |-L0.?-| "
- - "L0.?[1686866632513513491,1686867503324324300] 1686929712.33s 7mb |-L0.?-| "
- - "L0.?[1686867503324324301,1686868374135135110] 1686929712.33s 7mb |-L0.?-| "
- - "L0.?[1686868374135135111,1686869244945945920] 1686929712.33s 7mb |-L0.?-| "
- - "L0.?[1686869244945945921,1686870115756756730] 1686929712.33s 7mb |-L0.?-| "
- - "L0.?[1686870115756756731,1686870986567567540] 1686929712.33s 7mb |-L0.?-| "
- - "L0.?[1686870986567567541,1686871857378378350] 1686929712.33s 7mb |-L0.?-| "
- - "L0.?[1686871857378378351,1686872728189189160] 1686929712.33s 7mb |-L0.?-| "
- - "L0.?[1686872728189189161,1686873599000000000] 1686929712.33s 7mb |-L0.?-| "
- - "**** Simulation run 51, type=split(HighL0OverlapTotalBacklog)(split_times=[1686864890891891870, 1686865761702702680, 1686866632513513490, 1686867503324324300, 1686868374135135110, 1686869244945945920, 1686870115756756730, 1686870986567567540, 1686871857378378350, 1686872728189189160]). 1 Input Files, 75mb total:"
- - "L0, all files 75mb "
- - "L0.61[1686864419000000000,1686873599000000000] 1686935742.51s|-----------------------------------------L0.61------------------------------------------|"
- - "**** 11 Output Files (parquet_file_id not yet assigned), 75mb total:"
- - "L0 "
- - "L0.?[1686864419000000000,1686864890891891870] 1686935742.51s 4mb|L0.?| "
- - "L0.?[1686864890891891871,1686865761702702680] 1686935742.51s 7mb |-L0.?-| "
- - "L0.?[1686865761702702681,1686866632513513490] 1686935742.51s 7mb |-L0.?-| "
- - "L0.?[1686866632513513491,1686867503324324300] 1686935742.51s 7mb |-L0.?-| "
- - "L0.?[1686867503324324301,1686868374135135110] 1686935742.51s 7mb |-L0.?-| "
- - "L0.?[1686868374135135111,1686869244945945920] 1686935742.51s 7mb |-L0.?-| "
- - "L0.?[1686869244945945921,1686870115756756730] 1686935742.51s 7mb |-L0.?-| "
- - "L0.?[1686870115756756731,1686870986567567540] 1686935742.51s 7mb |-L0.?-| "
- - "L0.?[1686870986567567541,1686871857378378350] 1686935742.51s 7mb |-L0.?-| "
- - "L0.?[1686871857378378351,1686872728189189160] 1686935742.51s 7mb |-L0.?-| "
- - "L0.?[1686872728189189161,1686873599000000000] 1686935742.51s 7mb |-L0.?-| "
- - "**** Simulation run 52, type=split(HighL0OverlapTotalBacklog)(split_times=[1686864890891891870, 1686865761702702680, 1686866632513513490, 1686867503324324300, 1686868374135135110, 1686869244945945920, 1686870115756756730, 1686870986567567540, 1686871857378378350, 1686872728189189160]). 1 Input Files, 79mb total:"
- - "L0, all files 79mb "
- - "L0.63[1686864479000000000,1686873599000000000] 1686931600.58s|-----------------------------------------L0.63------------------------------------------|"
- - "**** 11 Output Files (parquet_file_id not yet assigned), 79mb total:"
- - "L0 "
- - "L0.?[1686864479000000000,1686864890891891870] 1686931600.58s 4mb|L0.?| "
- - "L0.?[1686864890891891871,1686865761702702680] 1686931600.58s 8mb |-L0.?-| "
- - "L0.?[1686865761702702681,1686866632513513490] 1686931600.58s 8mb |-L0.?-| "
- - "L0.?[1686866632513513491,1686867503324324300] 1686931600.58s 8mb |-L0.?-| "
- - "L0.?[1686867503324324301,1686868374135135110] 1686931600.58s 8mb |-L0.?-| "
- - "L0.?[1686868374135135111,1686869244945945920] 1686931600.58s 8mb |-L0.?-| "
- - "L0.?[1686869244945945921,1686870115756756730] 1686931600.58s 8mb |-L0.?-| "
- - "L0.?[1686870115756756731,1686870986567567540] 1686931600.58s 8mb |-L0.?-| "
- - "L0.?[1686870986567567541,1686871857378378350] 1686931600.58s 8mb |-L0.?-| "
- - "L0.?[1686871857378378351,1686872728189189160] 1686931600.58s 8mb |-L0.?-| "
- - "L0.?[1686872728189189161,1686873599000000000] 1686931600.58s 8mb |-L0.?-| "
- - "**** Simulation run 53, type=split(HighL0OverlapTotalBacklog)(split_times=[1686864890891891870, 1686865761702702680, 1686866632513513490, 1686867503324324300, 1686868374135135110, 1686869244945945920, 1686870115756756730, 1686870986567567540, 1686871857378378350, 1686872728189189160]). 1 Input Files, 74mb total:"
- - "L0, all files 74mb "
- - "L0.62[1686864479000000000,1686873599000000000] 1686934966.48s|-----------------------------------------L0.62------------------------------------------|"
- - "**** 11 Output Files (parquet_file_id not yet assigned), 74mb total:"
- - "L0 "
- - "L0.?[1686864479000000000,1686864890891891870] 1686934966.48s 3mb|L0.?| "
- - "L0.?[1686864890891891871,1686865761702702680] 1686934966.48s 7mb |-L0.?-| "
- - "L0.?[1686865761702702681,1686866632513513490] 1686934966.48s 7mb |-L0.?-| "
- - "L0.?[1686866632513513491,1686867503324324300] 1686934966.48s 7mb |-L0.?-| "
- - "L0.?[1686867503324324301,1686868374135135110] 1686934966.48s 7mb |-L0.?-| "
- - "L0.?[1686868374135135111,1686869244945945920] 1686934966.48s 7mb |-L0.?-| "
- - "L0.?[1686869244945945921,1686870115756756730] 1686934966.48s 7mb |-L0.?-| "
- - "L0.?[1686870115756756731,1686870986567567540] 1686934966.48s 7mb |-L0.?-| "
- - "L0.?[1686870986567567541,1686871857378378350] 1686934966.48s 7mb |-L0.?-| "
- - "L0.?[1686871857378378351,1686872728189189160] 1686934966.48s 7mb |-L0.?-| "
- - "L0.?[1686872728189189161,1686873599000000000] 1686934966.48s 7mb |-L0.?-| "
- - "**** Simulation run 54, type=split(HighL0OverlapTotalBacklog)(split_times=[1686864890891891870, 1686865761702702680, 1686866632513513490, 1686867503324324300, 1686868374135135110, 1686869244945945920, 1686870115756756730, 1686870986567567540, 1686871857378378350, 1686872728189189160]). 1 Input Files, 73mb total:"
- - "L0, all files 73mb "
- - "L0.64[1686864659000000000,1686873599000000000] 1686933528.17s|-----------------------------------------L0.64------------------------------------------|"
- - "**** 11 Output Files (parquet_file_id not yet assigned), 73mb total:"
- - "L0 "
- - "L0.?[1686864659000000000,1686864890891891870] 1686933528.17s 2mb|L0.?| "
- - "L0.?[1686864890891891871,1686865761702702680] 1686933528.17s 7mb |-L0.?-| "
- - "L0.?[1686865761702702681,1686866632513513490] 1686933528.17s 7mb |-L0.?-| "
- - "L0.?[1686866632513513491,1686867503324324300] 1686933528.17s 7mb |-L0.?-| "
- - "L0.?[1686867503324324301,1686868374135135110] 1686933528.17s 7mb |-L0.?-| "
- - "L0.?[1686868374135135111,1686869244945945920] 1686933528.17s 7mb |-L0.?-| "
- - "L0.?[1686869244945945921,1686870115756756730] 1686933528.17s 7mb |-L0.?-| "
- - "L0.?[1686870115756756731,1686870986567567540] 1686933528.17s 7mb |-L0.?-| "
- - "L0.?[1686870986567567541,1686871857378378350] 1686933528.17s 7mb |-L0.?-| "
- - "L0.?[1686871857378378351,1686872728189189160] 1686933528.17s 7mb |-L0.?-| "
- - "L0.?[1686872728189189161,1686873599000000000] 1686933528.17s 7mb |-L0.?-| "
- - "**** Simulation run 55, type=split(HighL0OverlapTotalBacklog)(split_times=[1686864890891891870, 1686865761702702680, 1686866632513513490, 1686867503324324300, 1686868374135135110, 1686869244945945920, 1686870115756756730, 1686870986567567540, 1686871857378378350, 1686872728189189160]). 1 Input Files, 54mb total:"
- - "L0, all files 54mb "
- - "L0.65[1686864719000000000,1686873599000000000] 1686930563.07s|-----------------------------------------L0.65------------------------------------------|"
- - "**** 11 Output Files (parquet_file_id not yet assigned), 54mb total:"
- - "L0 "
- - "L0.?[1686864719000000000,1686864890891891870] 1686930563.07s 1mb|L0.?| "
- - "L0.?[1686864890891891871,1686865761702702680] 1686930563.07s 5mb |-L0.?-| "
- - "L0.?[1686865761702702681,1686866632513513490] 1686930563.07s 5mb |-L0.?-| "
- - "L0.?[1686866632513513491,1686867503324324300] 1686930563.07s 5mb |-L0.?-| "
- - "L0.?[1686867503324324301,1686868374135135110] 1686930563.07s 5mb |-L0.?-| "
- - "L0.?[1686868374135135111,1686869244945945920] 1686930563.07s 5mb |-L0.?-| "
- - "L0.?[1686869244945945921,1686870115756756730] 1686930563.07s 5mb |-L0.?-| "
- - "L0.?[1686870115756756731,1686870986567567540] 1686930563.07s 5mb |-L0.?-| "
- - "L0.?[1686870986567567541,1686871857378378350] 1686930563.07s 5mb |-L0.?-| "
- - "L0.?[1686871857378378351,1686872728189189160] 1686930563.07s 5mb |-L0.?-| "
- - "L0.?[1686872728189189161,1686873599000000000] 1686930563.07s 5mb |-L0.?-| "
- - "**** Simulation run 56, type=split(HighL0OverlapTotalBacklog)(split_times=[1686864890891891870, 1686865761702702680, 1686866632513513490, 1686867503324324300, 1686868374135135110, 1686869244945945920, 1686870115756756730, 1686870986567567540, 1686871857378378350, 1686872728189189160]). 1 Input Files, 54mb total:"
- - "L0, all files 54mb "
- - "L0.66[1686864719000000000,1686873599000000000] 1686933271.57s|-----------------------------------------L0.66------------------------------------------|"
- - "**** 11 Output Files (parquet_file_id not yet assigned), 54mb total:"
- - "L0 "
- - "L0.?[1686864719000000000,1686864890891891870] 1686933271.57s 1mb|L0.?| "
- - "L0.?[1686864890891891871,1686865761702702680] 1686933271.57s 5mb |-L0.?-| "
- - "L0.?[1686865761702702681,1686866632513513490] 1686933271.57s 5mb |-L0.?-| "
- - "L0.?[1686866632513513491,1686867503324324300] 1686933271.57s 5mb |-L0.?-| "
- - "L0.?[1686867503324324301,1686868374135135110] 1686933271.57s 5mb |-L0.?-| "
- - "L0.?[1686868374135135111,1686869244945945920] 1686933271.57s 5mb |-L0.?-| "
- - "L0.?[1686869244945945921,1686870115756756730] 1686933271.57s 5mb |-L0.?-| "
- - "L0.?[1686870115756756731,1686870986567567540] 1686933271.57s 5mb |-L0.?-| "
- - "L0.?[1686870986567567541,1686871857378378350] 1686933271.57s 5mb |-L0.?-| "
- - "L0.?[1686871857378378351,1686872728189189160] 1686933271.57s 5mb |-L0.?-| "
- - "L0.?[1686872728189189161,1686873599000000000] 1686933271.57s 5mb |-L0.?-| "
- - "**** Simulation run 57, type=split(HighL0OverlapTotalBacklog)(split_times=[1686865761702702680, 1686866632513513490, 1686867503324324300, 1686868374135135110, 1686869244945945920, 1686870115756756730, 1686870986567567540, 1686871857378378350, 1686872728189189160]). 1 Input Files, 53mb total:"
- - "L0, all files 53mb "
- - "L0.67[1686864899000000000,1686873599000000000] 1686931336.08s|-----------------------------------------L0.67------------------------------------------|"
- - "**** 10 Output Files (parquet_file_id not yet assigned), 53mb total:"
- - "L0 "
- - "L0.?[1686864899000000000,1686865761702702680] 1686931336.08s 5mb|-L0.?-| "
- - "L0.?[1686865761702702681,1686866632513513490] 1686931336.08s 5mb |-L0.?--| "
- - "L0.?[1686866632513513491,1686867503324324300] 1686931336.08s 5mb |-L0.?--| "
- - "L0.?[1686867503324324301,1686868374135135110] 1686931336.08s 5mb |-L0.?--| "
- - "L0.?[1686868374135135111,1686869244945945920] 1686931336.08s 5mb |-L0.?--| "
- - "L0.?[1686869244945945921,1686870115756756730] 1686931336.08s 5mb |-L0.?--| "
- - "L0.?[1686870115756756731,1686870986567567540] 1686931336.08s 5mb |-L0.?--| "
- - "L0.?[1686870986567567541,1686871857378378350] 1686931336.08s 5mb |-L0.?--| "
- - "L0.?[1686871857378378351,1686872728189189160] 1686931336.08s 5mb |-L0.?--| "
- - "L0.?[1686872728189189161,1686873599000000000] 1686931336.08s 5mb |-L0.?--| "
- - "**** Simulation run 58, type=split(HighL0OverlapTotalBacklog)(split_times=[1686865761702702680, 1686866632513513490, 1686867503324324300, 1686868374135135110, 1686869244945945920, 1686870115756756730, 1686870986567567540, 1686871857378378350, 1686872728189189160]). 1 Input Files, 49mb total:"
- - "L0, all files 49mb "
- - "L0.68[1686865319000000000,1686873599000000000] 1686929421.02s|-----------------------------------------L0.68------------------------------------------|"
- - "**** 10 Output Files (parquet_file_id not yet assigned), 49mb total:"
- - "L0 "
- - "L0.?[1686865319000000000,1686865761702702680] 1686929421.02s 3mb|L0.?| "
- - "L0.?[1686865761702702681,1686866632513513490] 1686929421.02s 5mb |-L0.?--| "
- - "L0.?[1686866632513513491,1686867503324324300] 1686929421.02s 5mb |-L0.?--| "
- - "L0.?[1686867503324324301,1686868374135135110] 1686929421.02s 5mb |-L0.?--| "
- - "L0.?[1686868374135135111,1686869244945945920] 1686929421.02s 5mb |-L0.?--| "
- - "L0.?[1686869244945945921,1686870115756756730] 1686929421.02s 5mb |-L0.?--| "
- - "L0.?[1686870115756756731,1686870986567567540] 1686929421.02s 5mb |-L0.?--| "
- - "L0.?[1686870986567567541,1686871857378378350] 1686929421.02s 5mb |-L0.?--| "
- - "L0.?[1686871857378378351,1686872728189189160] 1686929421.02s 5mb |-L0.?--| "
- - "L0.?[1686872728189189161,1686873599000000000] 1686929421.02s 5mb |-L0.?--| "
- - "**** Simulation run 59, type=split(HighL0OverlapTotalBacklog)(split_times=[1686865761702702680, 1686866632513513490, 1686867503324324300, 1686868374135135110, 1686869244945945920, 1686870115756756730, 1686870986567567540, 1686871857378378350, 1686872728189189160]). 1 Input Files, 49mb total:"
- - "L0, all files 49mb "
- - "L0.69[1686865319000000000,1686873599000000000] 1686935546.05s|-----------------------------------------L0.69------------------------------------------|"
- - "**** 10 Output Files (parquet_file_id not yet assigned), 49mb total:"
- - "L0 "
- - "L0.?[1686865319000000000,1686865761702702680] 1686935546.05s 3mb|L0.?| "
- - "L0.?[1686865761702702681,1686866632513513490] 1686935546.05s 5mb |-L0.?--| "
- - "L0.?[1686866632513513491,1686867503324324300] 1686935546.05s 5mb |-L0.?--| "
- - "L0.?[1686867503324324301,1686868374135135110] 1686935546.05s 5mb |-L0.?--| "
- - "L0.?[1686868374135135111,1686869244945945920] 1686935546.05s 5mb |-L0.?--| "
- - "L0.?[1686869244945945921,1686870115756756730] 1686935546.05s 5mb |-L0.?--| "
- - "L0.?[1686870115756756731,1686870986567567540] 1686935546.05s 5mb |-L0.?--| "
- - "L0.?[1686870986567567541,1686871857378378350] 1686935546.05s 5mb |-L0.?--| "
- - "L0.?[1686871857378378351,1686872728189189160] 1686935546.05s 5mb |-L0.?--| "
- - "L0.?[1686872728189189161,1686873599000000000] 1686935546.05s 5mb |-L0.?--| "
- - "**** Simulation run 60, type=split(HighL0OverlapTotalBacklog)(split_times=[1686865761702702680, 1686866632513513490, 1686867503324324300, 1686868374135135110, 1686869244945945920, 1686870115756756730, 1686870986567567540, 1686871857378378350, 1686872728189189160]). 1 Input Files, 88mb total:"
- - "L0, all files 88mb "
- - "L0.70[1686865499000000000,1686873599000000000] 1686930780.95s|-----------------------------------------L0.70------------------------------------------|"
- - "**** 10 Output Files (parquet_file_id not yet assigned), 88mb total:"
- - "L0 "
- - "L0.?[1686865499000000000,1686865761702702680] 1686930780.95s 3mb|L0.?| "
- - "L0.?[1686865761702702681,1686866632513513490] 1686930780.95s 9mb |-L0.?--| "
- - "L0.?[1686866632513513491,1686867503324324300] 1686930780.95s 9mb |-L0.?--| "
- - "L0.?[1686867503324324301,1686868374135135110] 1686930780.95s 9mb |-L0.?--| "
- - "L0.?[1686868374135135111,1686869244945945920] 1686930780.95s 9mb |-L0.?--| "
- - "L0.?[1686869244945945921,1686870115756756730] 1686930780.95s 9mb |-L0.?--| "
- - "L0.?[1686870115756756731,1686870986567567540] 1686930780.95s 9mb |-L0.?--| "
- - "L0.?[1686870986567567541,1686871857378378350] 1686930780.95s 9mb |-L0.?--| "
- - "L0.?[1686871857378378351,1686872728189189160] 1686930780.95s 9mb |-L0.?--| "
- - "L0.?[1686872728189189161,1686873599000000000] 1686930780.95s 9mb |-L0.?--| "
- - "**** Simulation run 61, type=split(HighL0OverlapTotalBacklog)(split_times=[1686866632513513490, 1686867503324324300, 1686868374135135110, 1686869244945945920, 1686870115756756730, 1686870986567567540, 1686871857378378350, 1686872728189189160]). 1 Input Files, 45mb total:"
- - "L0, all files 45mb "
- - "L0.71[1686866099000000000,1686873599000000000] 1686934759.75s|-----------------------------------------L0.71------------------------------------------|"
- - "**** 9 Output Files (parquet_file_id not yet assigned), 45mb total:"
- - "L0 "
- - "L0.?[1686866099000000000,1686866632513513490] 1686934759.75s 3mb|L0.?| "
- - "L0.?[1686866632513513491,1686867503324324300] 1686934759.75s 5mb |--L0.?--| "
- - "L0.?[1686867503324324301,1686868374135135110] 1686934759.75s 5mb |--L0.?--| "
- - "L0.?[1686868374135135111,1686869244945945920] 1686934759.75s 5mb |--L0.?--| "
- - "L0.?[1686869244945945921,1686870115756756730] 1686934759.75s 5mb |--L0.?--| "
- - "L0.?[1686870115756756731,1686870986567567540] 1686934759.75s 5mb |--L0.?--| "
- - "L0.?[1686870986567567541,1686871857378378350] 1686934759.75s 5mb |--L0.?--| "
- - "L0.?[1686871857378378351,1686872728189189160] 1686934759.75s 5mb |--L0.?--| "
- - "L0.?[1686872728189189161,1686873599000000000] 1686934759.75s 5mb |--L0.?--| "
- - "**** Simulation run 62, type=split(HighL0OverlapTotalBacklog)(split_times=[1686867503324324300, 1686868374135135110, 1686869244945945920, 1686870115756756730, 1686870986567567540, 1686871857378378350, 1686872728189189160]). 1 Input Files, 109mb total:"
- - "L0, all files 109mb "
- - "L0.72[1686867119000000000,1686873599000000000] 1686932677.39s|-----------------------------------------L0.72------------------------------------------|"
- - "**** 8 Output Files (parquet_file_id not yet assigned), 109mb total:"
- - "L0 "
- - "L0.?[1686867119000000000,1686867503324324300] 1686932677.39s 6mb|L0.?| "
- - "L0.?[1686867503324324301,1686868374135135110] 1686932677.39s 15mb |---L0.?---| "
- - "L0.?[1686868374135135111,1686869244945945920] 1686932677.39s 15mb |---L0.?---| "
- - "L0.?[1686869244945945921,1686870115756756730] 1686932677.39s 15mb |---L0.?---| "
- - "L0.?[1686870115756756731,1686870986567567540] 1686932677.39s 15mb |---L0.?---| "
- - "L0.?[1686870986567567541,1686871857378378350] 1686932677.39s 15mb |---L0.?---| "
- - "L0.?[1686871857378378351,1686872728189189160] 1686932677.39s 15mb |---L0.?---| "
- - "L0.?[1686872728189189161,1686873599000000000] 1686932677.39s 15mb |---L0.?---| "
- - "**** Simulation run 63, type=split(HighL0OverlapTotalBacklog)(split_times=[1686868374135135110, 1686869244945945920, 1686870115756756730, 1686870986567567540, 1686871857378378350, 1686872728189189160]). 1 Input Files, 67mb total:"
- - "L0, all files 67mb "
- - "L0.76[1686868259000000000,1686873599000000000] 1686935947.46s|-----------------------------------------L0.76------------------------------------------|"
- - "**** 7 Output Files (parquet_file_id not yet assigned), 67mb total:"
- - "L0 "
- - "L0.?[1686868259000000000,1686868374135135110] 1686935947.46s 1mb|L0.?| "
- - "L0.?[1686868374135135111,1686869244945945920] 1686935947.46s 11mb |----L0.?----| "
- - "L0.?[1686869244945945921,1686870115756756730] 1686935947.46s 11mb |----L0.?----| "
- - "L0.?[1686870115756756731,1686870986567567540] 1686935947.46s 11mb |----L0.?----| "
- - "L0.?[1686870986567567541,1686871857378378350] 1686935947.46s 11mb |----L0.?----| "
- - "L0.?[1686871857378378351,1686872728189189160] 1686935947.46s 11mb |----L0.?----| "
- - "L0.?[1686872728189189161,1686873599000000000] 1686935947.46s 11mb |----L0.?----| "
- - "**** Simulation run 64, type=split(HighL0OverlapTotalBacklog)(split_times=[1686869244945945920, 1686870115756756730, 1686870986567567540, 1686871857378378350, 1686872728189189160]). 1 Input Files, 89mb total:"
- - "L1, all files 89mb "
- - "L1.77[1686868379000000000,1686873599000000000] 1686928854.57s|-----------------------------------------L1.77------------------------------------------|"
- - "**** 6 Output Files (parquet_file_id not yet assigned), 89mb total:"
- - "L1 "
- - "L1.?[1686868379000000000,1686869244945945920] 1686928854.57s 15mb|----L1.?----| "
- - "L1.?[1686869244945945921,1686870115756756730] 1686928854.57s 15mb |----L1.?-----| "
- - "L1.?[1686870115756756731,1686870986567567540] 1686928854.57s 15mb |----L1.?-----| "
- - "L1.?[1686870986567567541,1686871857378378350] 1686928854.57s 15mb |----L1.?-----| "
- - "L1.?[1686871857378378351,1686872728189189160] 1686928854.57s 15mb |----L1.?-----| "
- - "L1.?[1686872728189189161,1686873599000000000] 1686928854.57s 15mb |----L1.?-----| "
- - "**** Simulation run 65, type=split(HighL0OverlapTotalBacklog)(split_times=[1686869244945945920, 1686870115756756730, 1686870986567567540, 1686871857378378350, 1686872728189189160]). 1 Input Files, 58mb total:"
- - "L0, all files 58mb "
- - "L0.79[1686868799000000000,1686873599000000000] 1686929965.33s|-----------------------------------------L0.79------------------------------------------|"
- - "**** 6 Output Files (parquet_file_id not yet assigned), 58mb total:"
- - "L0 "
- - "L0.?[1686868799000000000,1686869244945945920] 1686929965.33s 5mb|-L0.?-| "
- - "L0.?[1686869244945945921,1686870115756756730] 1686929965.33s 11mb |-----L0.?-----| "
- - "L0.?[1686870115756756731,1686870986567567540] 1686929965.33s 11mb |-----L0.?-----| "
- - "L0.?[1686870986567567541,1686871857378378350] 1686929965.33s 11mb |-----L0.?-----| "
- - "L0.?[1686871857378378351,1686872728189189160] 1686929965.33s 11mb |-----L0.?-----| "
- - "L0.?[1686872728189189161,1686873599000000000] 1686929965.33s 11mb |-----L0.?-----| "
- - "**** Simulation run 66, type=split(HighL0OverlapTotalBacklog)(split_times=[1686869244945945920, 1686870115756756730, 1686870986567567540, 1686871857378378350, 1686872728189189160]). 1 Input Files, 57mb total:"
- - "L0, all files 57mb "
- - "L0.80[1686868919000000000,1686873599000000000] 1686933830.06s|-----------------------------------------L0.80------------------------------------------|"
- - "**** 6 Output Files (parquet_file_id not yet assigned), 57mb total:"
- - "L0 "
- - "L0.?[1686868919000000000,1686869244945945920] 1686933830.06s 4mb|L0.?| "
- - "L0.?[1686869244945945921,1686870115756756730] 1686933830.06s 11mb |-----L0.?-----| "
- - "L0.?[1686870115756756731,1686870986567567540] 1686933830.06s 11mb |-----L0.?-----| "
- - "L0.?[1686870986567567541,1686871857378378350] 1686933830.06s 11mb |-----L0.?-----| "
- - "L0.?[1686871857378378351,1686872728189189160] 1686933830.06s 11mb |-----L0.?-----| "
- - "L0.?[1686872728189189161,1686873599000000000] 1686933830.06s 11mb |-----L0.?-----| "
- - "**** Simulation run 67, type=split(HighL0OverlapTotalBacklog)(split_times=[1686870115756756730, 1686870986567567540, 1686871857378378350, 1686872728189189160]). 1 Input Files, 49mb total:"
- - "L0, all files 49mb "
- - "L0.81[1686869579000000000,1686873599000000000] 1686935151.54s|-----------------------------------------L0.81------------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 49mb total:"
- - "L0 "
- - "L0.?[1686869579000000000,1686870115756756730] 1686935151.54s 6mb|---L0.?---| "
- - "L0.?[1686870115756756731,1686870986567567540] 1686935151.54s 11mb |------L0.?-------| "
- - "L0.?[1686870986567567541,1686871857378378350] 1686935151.54s 11mb |------L0.?-------| "
- - "L0.?[1686871857378378351,1686872728189189160] 1686935151.54s 11mb |------L0.?-------| "
- - "L0.?[1686872728189189161,1686873599000000000] 1686935151.54s 11mb |------L0.?-------| "
- - "**** Simulation run 68, type=split(HighL0OverlapTotalBacklog)(split_times=[1686870115756756730, 1686870986567567540, 1686871857378378350, 1686872728189189160]). 1 Input Files, 44mb total:"
- - "L0, all files 44mb "
- - "L0.82[1686869999000000000,1686873599000000000] 1686931893.7s|-----------------------------------------L0.82------------------------------------------|"
- - "**** 5 Output Files (parquet_file_id not yet assigned), 44mb total:"
- - "L0 "
- - "L0.?[1686869999000000000,1686870115756756730] 1686931893.7s 1mb|L0.?| "
- - "L0.?[1686870115756756731,1686870986567567540] 1686931893.7s 11mb |-------L0.?--------| "
- - "L0.?[1686870986567567541,1686871857378378350] 1686931893.7s 11mb |-------L0.?--------| "
- - "L0.?[1686871857378378351,1686872728189189160] 1686931893.7s 11mb |-------L0.?--------| "
- - "L0.?[1686872728189189161,1686873599000000000] 1686931893.7s 11mb |-------L0.?--------| "
- - "**** Simulation run 69, type=split(HighL0OverlapTotalBacklog)(split_times=[1686871857378378350, 1686872728189189160]). 1 Input Files, 9mb total:"
- - "L0, all files 9mb "
- - "L0.83[1686871619000000000,1686873599000000000] 1686936871.55s|-----------------------------------------L0.83------------------------------------------|"
- - "**** 3 Output Files (parquet_file_id not yet assigned), 9mb total:"
- - "L0 "
- - "L0.?[1686871619000000000,1686871857378378350] 1686936871.55s 1mb|--L0.?--| "
- - "L0.?[1686871857378378351,1686872728189189160] 1686936871.55s 4mb |----------------L0.?-----------------| "
- - "L0.?[1686872728189189161,1686873599000000000] 1686936871.55s 4mb |----------------L0.?-----------------| "
- - "Committing partition 1:"
- - " Soft Deleting 70 files: L0.1, L0.3, L0.4, L0.5, L1.6, L0.7, L0.8, L0.9, L0.10, L0.11, L0.12, L0.13, L0.14, L0.15, L0.16, L0.17, L0.18, L0.19, L0.20, L0.21, L0.22, L0.23, L1.24, L0.26, L1.28, L1.29, L0.31, L0.32, L0.33, L0.34, L0.35, L0.36, L0.37, L0.38, L0.39, L0.40, L0.41, L0.42, L0.43, L0.44, L0.45, L1.47, L0.48, L0.49, L0.50, L0.51, L0.52, L1.55, L0.57, L1.58, L0.60, L0.61, L0.62, L0.63, L0.64, L0.65, L0.66, L0.67, L0.68, L0.69, L0.70, L0.71, L0.72, L0.76, L1.77, L0.79, L0.80, L0.81, L0.82, L0.83"
- - " Creating 852 files"
- - "**** Simulation run 70, type=split(CompactAndSplitOutput(ManySmallFiles))(split_times=[1686842589641148927]). 20 Input Files, 144mb total:"
- - "L0 "
- - "L0.89[1686841379000000000,1686842249810810810] 1686929421.02s 10mb|-------------------L0.89-------------------| "
- - "L0.90[1686842249810810811,1686843120621621620] 1686929421.02s 10mb |------------------L0.90-------------------| "
- - "L0.103[1686841379000000000,1686842249810810810] 1686929712.33s 5mb|------------------L0.103-------------------| "
- - "L0.104[1686842249810810811,1686843120621621620] 1686929712.33s 5mb |------------------L0.104------------------| "
- - "L0.117[1686841379000000000,1686842249810810810] 1686929965.33s 5mb|------------------L0.117-------------------| "
- - "L0.118[1686842249810810811,1686843120621621620] 1686929965.33s 5mb |------------------L0.118------------------| "
- - "L0.133[1686841379000000000,1686842249810810810] 1686930563.07s 11mb|------------------L0.133-------------------| "
- - "L0.134[1686842249810810811,1686843120621621620] 1686930563.07s 11mb |------------------L0.134------------------| "
- - "L0.147[1686841379000000000,1686842249810810810] 1686930780.95s 4mb|------------------L0.147-------------------| "
- - "L0.148[1686842249810810811,1686843120621621620] 1686930780.95s 4mb |------------------L0.148------------------| "
- - "L0.161[1686841379000000000,1686842249810810810] 1686931336.08s 11mb|------------------L0.161-------------------| "
- - "L0.162[1686842249810810811,1686843120621621620] 1686931336.08s 11mb |------------------L0.162------------------| "
- - "L0.175[1686841379000000000,1686842249810810810] 1686931600.58s 5mb|------------------L0.175-------------------| "
- - "L0.176[1686842249810810811,1686843120621621620] 1686931600.58s 5mb |------------------L0.176------------------| "
- - "L0.189[1686841379000000000,1686842249810810810] 1686931893.7s 5mb|------------------L0.189-------------------| "
- - "L0.190[1686842249810810811,1686843120621621620] 1686931893.7s 5mb |------------------L0.190------------------| "
- - "L0.206[1686841379000000000,1686842249810810810] 1686932458.05s 10mb|------------------L0.206-------------------| "
- - "L0.207[1686842249810810811,1686843120621621620] 1686932458.05s 10mb |------------------L0.207------------------| "
- - "L0.216[1686841379000000000,1686842249810810810] 1686932677.39s 4mb|------------------L0.216-------------------| "
- - "L0.217[1686842249810810811,1686843120621621620] 1686932677.39s 4mb |------------------L0.217------------------| "
- - "**** 2 Output Files (parquet_file_id not yet assigned), 144mb total:"
- - "L0 "
- - "L0.?[1686841379000000000,1686842589641148927] 1686932677.39s 100mb|----------------------------L0.?----------------------------| "
- - "L0.?[1686842589641148928,1686843120621621620] 1686932677.39s 44mb |----------L0.?-----------| "
- - "Committing partition 1:"
- - " Soft Deleting 20 files: L0.89, L0.90, L0.103, L0.104, L0.117, L0.118, L0.133, L0.134, L0.147, L0.148, L0.161, L0.162, L0.175, L0.176, L0.189, L0.190, L0.206, L0.207, L0.216, L0.217"
- - " Creating 2 files"
- - "**** Simulation run 71, type=split(CompactAndSplitOutput(ManySmallFiles))(split_times=[1686842593136179151]). 20 Input Files, 143mb total:"
- - "L0 "
- - "L0.231[1686841379000000000,1686842249810810810] 1686933271.57s 11mb|------------------L0.231-------------------| "
- - "L0.232[1686842249810810811,1686843120621621620] 1686933271.57s 11mb |------------------L0.232------------------| "
- - "L0.245[1686841379000000000,1686842249810810810] 1686933528.17s 5mb|------------------L0.245-------------------| "
- - "L0.246[1686842249810810811,1686843120621621620] 1686933528.17s 5mb |------------------L0.246------------------| "
- - "L0.259[1686841379000000000,1686842249810810810] 1686933830.06s 5mb|------------------L0.259-------------------| "
- - "L0.260[1686842249810810811,1686843120621621620] 1686933830.06s 5mb |------------------L0.260------------------| "
- - "L0.275[1686841379000000000,1686842249810810810] 1686934254.96s 8mb|------------------L0.275-------------------| "
- - "L0.276[1686842249810810811,1686843120621621620] 1686934254.96s 8mb |------------------L0.276------------------| "
- - "L0.288[1686841379000000000,1686842249810810810] 1686934759.75s 10mb|------------------L0.288-------------------| "
- - "L0.289[1686842249810810811,1686843120621621620] 1686934759.75s 10mb |------------------L0.289------------------| "
- - "L0.303[1686841379000000000,1686842249810810810] 1686934966.48s 5mb|------------------L0.303-------------------| "
- - "L0.304[1686842249810810811,1686843120621621620] 1686934966.48s 5mb |------------------L0.304------------------| "
- - "L0.317[1686841379000000000,1686842249810810810] 1686935151.54s 5mb|------------------L0.317-------------------| "
- - "L0.318[1686842249810810811,1686843120621621620] 1686935151.54s 5mb |------------------L0.318------------------| "
- - "L0.334[1686841379000000000,1686842249810810810] 1686935546.05s 10mb|------------------L0.334-------------------| "
- - "L0.335[1686842249810810811,1686843120621621620] 1686935546.05s 10mb |------------------L0.335------------------| "
- - "L0.348[1686841379000000000,1686842249810810810] 1686935742.51s 5mb|------------------L0.348-------------------| "
- - "L0.349[1686842249810810811,1686843120621621620] 1686935742.51s 5mb |------------------L0.349------------------| "
- - "L0.362[1686841379000000000,1686842249810810810] 1686935947.46s 5mb|------------------L0.362-------------------| "
- - "L0.363[1686842249810810811,1686843120621621620] 1686935947.46s 5mb |------------------L0.363------------------| "
- - "**** 2 Output Files (parquet_file_id not yet assigned), 143mb total:"
- - "L0 "
- - "L0.?[1686841379000000000,1686842593136179151] 1686935947.46s 100mb|----------------------------L0.?----------------------------| "
- - "L0.?[1686842593136179152,1686843120621621620] 1686935947.46s 43mb |----------L0.?-----------| "
- - "Committing partition 1:"
- - " Soft Deleting 20 files: L0.231, L0.232, L0.245, L0.246, L0.259, L0.260, L0.275, L0.276, L0.288, L0.289, L0.303, L0.304, L0.317, L0.318, L0.334, L0.335, L0.348, L0.349, L0.362, L0.363"
- - " Creating 2 files"
- - "**** Simulation run 72, type=compact(ManySmallFiles). 2 Input Files, 6mb total:"
- - "L0, all files 3mb "
- - "L0.378[1686841379000000000,1686842249810810810] 1686936871.55s|------------------L0.378-------------------| "
- - "L0.379[1686842249810810811,1686843120621621620] 1686936871.55s |------------------L0.379------------------| "
- - "**** 1 Output Files (parquet_file_id not yet assigned), 6mb total:"
- - "L0, all files 6mb "
- - "L0.?[1686841379000000000,1686843120621621620] 1686936871.55s|------------------------------------------L0.?------------------------------------------|"
- - "Committing partition 1:"
- - " Soft Deleting 2 files: L0.378, L0.379"
- - " Creating 1 files"
- - "**** Simulation run 73, type=split(CompactAndSplitOutput(ManySmallFiles))(split_times=[1686844331262770547]). 20 Input Files, 144mb total:"
- - "L0 "
- - "L0.91[1686843120621621621,1686843991432432430] 1686929421.02s 10mb|------------------L0.91-------------------| "
- - "L0.92[1686843991432432431,1686844862243243240] 1686929421.02s 10mb |------------------L0.92-------------------| "
- - "L0.105[1686843120621621621,1686843991432432430] 1686929712.33s 5mb|------------------L0.105------------------| "
- - "L0.106[1686843991432432431,1686844862243243240] 1686929712.33s 5mb |------------------L0.106------------------| "
- - "L0.119[1686843120621621621,1686843991432432430] 1686929965.33s 5mb|------------------L0.119------------------| "
- - "L0.120[1686843991432432431,1686844862243243240] 1686929965.33s 5mb |------------------L0.120------------------| "
- - "L0.135[1686843120621621621,1686843991432432430] 1686930563.07s 11mb|------------------L0.135------------------| "
- - "L0.136[1686843991432432431,1686844862243243240] 1686930563.07s 11mb |------------------L0.136------------------| "
- - "L0.149[1686843120621621621,1686843991432432430] 1686930780.95s 4mb|------------------L0.149------------------| "
- - "L0.150[1686843991432432431,1686844862243243240] 1686930780.95s 4mb |------------------L0.150------------------| "
- - "L0.163[1686843120621621621,1686843991432432430] 1686931336.08s 11mb|------------------L0.163------------------| "
- - "L0.164[1686843991432432431,1686844862243243240] 1686931336.08s 11mb |------------------L0.164------------------| "
- - "L0.177[1686843120621621621,1686843991432432430] 1686931600.58s 5mb|------------------L0.177------------------| "
- - "L0.178[1686843991432432431,1686844862243243240] 1686931600.58s 5mb |------------------L0.178------------------| "
- - "L0.191[1686843120621621621,1686843991432432430] 1686931893.7s 5mb|------------------L0.191------------------| "
- - "L0.192[1686843991432432431,1686844862243243240] 1686931893.7s 5mb |------------------L0.192------------------| "
- - "L0.208[1686843120621621621,1686843991432432430] 1686932458.05s 10mb|------------------L0.208------------------| "
- - "L0.209[1686843991432432431,1686844862243243240] 1686932458.05s 10mb |------------------L0.209------------------| "
- - "L0.218[1686843120621621621,1686843991432432430] 1686932677.39s 4mb|------------------L0.218------------------| "
- - "L0.219[1686843991432432431,1686844862243243240] 1686932677.39s 4mb |------------------L0.219------------------| "
- - "**** 2 Output Files (parquet_file_id not yet assigned), 144mb total:"
- - "L0 "
- - "L0.?[1686843120621621621,1686844331262770547] 1686932677.39s 100mb|----------------------------L0.?----------------------------| "
- - "L0.?[1686844331262770548,1686844862243243240] 1686932677.39s 44mb |----------L0.?-----------| "
- - "Committing partition 1:"
- - " Soft Deleting 20 files: L0.91, L0.92, L0.105, L0.106, L0.119, L0.120, L0.135, L0.136, L0.149, L0.150, L0.163, L0.164, L0.177, L0.178, L0.191, L0.192, L0.208, L0.209, L0.218, L0.219"
- - " Creating 2 files"
- - "**** Simulation run 74, type=split(CompactAndSplitOutput(ManySmallFiles))(split_times=[1686844334757800771]). 20 Input Files, 143mb total:"
- - "L0 "
- - "L0.233[1686843120621621621,1686843991432432430] 1686933271.57s 11mb|------------------L0.233------------------| "
- - "L0.234[1686843991432432431,1686844862243243240] 1686933271.57s 11mb |------------------L0.234------------------| "
- - "L0.247[1686843120621621621,1686843991432432430] 1686933528.17s 5mb|------------------L0.247------------------| "
- - "L0.248[1686843991432432431,1686844862243243240] 1686933528.17s 5mb |------------------L0.248------------------| "
- - "L0.261[1686843120621621621,1686843991432432430] 1686933830.06s 5mb|------------------L0.261------------------| "
- - "L0.262[1686843991432432431,1686844862243243240] 1686933830.06s 5mb |------------------L0.262------------------| "
- - "L0.277[1686843120621621621,1686843991432432430] 1686934254.96s 8mb|------------------L0.277------------------| "
- - "L0.278[1686843991432432431,1686844862243243240] 1686934254.96s 8mb |------------------L0.278------------------| "
- - "L0.290[1686843120621621621,1686843991432432430] 1686934759.75s 10mb|------------------L0.290------------------| "
- - "L0.291[1686843991432432431,1686844862243243240] 1686934759.75s 10mb |------------------L0.291------------------| "
- - "L0.305[1686843120621621621,1686843991432432430] 1686934966.48s 5mb|------------------L0.305------------------| "
- - "L0.306[1686843991432432431,1686844862243243240] 1686934966.48s 5mb |------------------L0.306------------------| "
- - "L0.319[1686843120621621621,1686843991432432430] 1686935151.54s 5mb|------------------L0.319------------------| "
- - "L0.320[1686843991432432431,1686844862243243240] 1686935151.54s 5mb |------------------L0.320------------------| "
- - "L0.336[1686843120621621621,1686843991432432430] 1686935546.05s 10mb|------------------L0.336------------------| "
- - "L0.337[1686843991432432431,1686844862243243240] 1686935546.05s 10mb |------------------L0.337------------------| "
- - "L0.350[1686843120621621621,1686843991432432430] 1686935742.51s 5mb|------------------L0.350------------------| "
- - "L0.351[1686843991432432431,1686844862243243240] 1686935742.51s 5mb |------------------L0.351------------------| "
- - "L0.364[1686843120621621621,1686843991432432430] 1686935947.46s 5mb|------------------L0.364------------------| "
- - "L0.365[1686843991432432431,1686844862243243240] 1686935947.46s 5mb |------------------L0.365------------------| "
- - "**** 2 Output Files (parquet_file_id not yet assigned), 143mb total:"
- - "L0 "
- - "L0.?[1686843120621621621,1686844334757800771] 1686935947.46s 100mb|----------------------------L0.?----------------------------| "
- - "L0.?[1686844334757800772,1686844862243243240] 1686935947.46s 43mb |----------L0.?-----------| "
- - "Committing partition 1:"
- - " Soft Deleting 20 files: L0.233, L0.234, L0.247, L0.248, L0.261, L0.262, L0.277, L0.278, L0.290, L0.291, L0.305, L0.306, L0.319, L0.320, L0.336, L0.337, L0.350, L0.351, L0.364, L0.365"
- - " Creating 2 files"
- - "**** Simulation run 75, type=compact(ManySmallFiles). 2 Input Files, 6mb total:"
- - "L0, all files 3mb "
- - "L0.380[1686843120621621621,1686843991432432430] 1686936871.55s|------------------L0.380------------------| "
- - "L0.381[1686843991432432431,1686844862243243240] 1686936871.55s |------------------L0.381------------------| "
- - "**** 1 Output Files (parquet_file_id not yet assigned), 6mb total:"
- - "L0, all files 6mb "
- - "L0.?[1686843120621621621,1686844862243243240] 1686936871.55s|------------------------------------------L0.?------------------------------------------|"
- - "Committing partition 1:"
- - " Soft Deleting 2 files: L0.380, L0.381"
- - " Creating 1 files"
- - "**** Simulation run 76, type=split(CompactAndSplitOutput(ManySmallFiles))(split_times=[1686846072884392167]). 20 Input Files, 144mb total:"
- - "L0 "
- - "L0.93[1686844862243243241,1686845733054054050] 1686929421.02s 10mb|------------------L0.93-------------------| "
- - "L0.94[1686845733054054051,1686846603864864860] 1686929421.02s 10mb |------------------L0.94-------------------| "
- - "L0.107[1686844862243243241,1686845733054054050] 1686929712.33s 5mb|------------------L0.107------------------| "
- - "L0.108[1686845733054054051,1686846603864864860] 1686929712.33s 5mb |------------------L0.108------------------| "
- - "L0.121[1686844862243243241,1686845733054054050] 1686929965.33s 5mb|------------------L0.121------------------| "
- - "L0.122[1686845733054054051,1686846603864864860] 1686929965.33s 5mb |------------------L0.122------------------| "
- - "L0.137[1686844862243243241,1686845733054054050] 1686930563.07s 11mb|------------------L0.137------------------| "
- - "L0.138[1686845733054054051,1686846603864864860] 1686930563.07s 11mb |------------------L0.138------------------| "
- - "L0.151[1686844862243243241,1686845733054054050] 1686930780.95s 4mb|------------------L0.151------------------| "
- - "L0.152[1686845733054054051,1686846603864864860] 1686930780.95s 4mb |------------------L0.152------------------| "
- - "L0.165[1686844862243243241,1686845733054054050] 1686931336.08s 11mb|------------------L0.165------------------| "
- - "L0.166[1686845733054054051,1686846603864864860] 1686931336.08s 11mb |------------------L0.166------------------| "
- - "L0.179[1686844862243243241,1686845733054054050] 1686931600.58s 5mb|------------------L0.179------------------| "
- - "L0.180[1686845733054054051,1686846603864864860] 1686931600.58s 5mb |------------------L0.180------------------| "
- - "L0.193[1686844862243243241,1686845733054054050] 1686931893.7s 5mb|------------------L0.193------------------| "
- - "L0.194[1686845733054054051,1686846603864864860] 1686931893.7s 5mb |------------------L0.194------------------| "
- - "L0.210[1686844862243243241,1686845733054054050] 1686932458.05s 10mb|------------------L0.210------------------| "
- - "L0.211[1686845733054054051,1686846603864864860] 1686932458.05s 10mb |------------------L0.211------------------| "
- - "L0.220[1686844862243243241,1686845733054054050] 1686932677.39s 4mb|------------------L0.220------------------| "
- - "L0.221[1686845733054054051,1686846603864864860] 1686932677.39s 4mb |------------------L0.221------------------| "
- - "**** 2 Output Files (parquet_file_id not yet assigned), 144mb total:"
- - "L0 "
- - "L0.?[1686844862243243241,1686846072884392167] 1686932677.39s 100mb|----------------------------L0.?----------------------------| "
- - "L0.?[1686846072884392168,1686846603864864860] 1686932677.39s 44mb |----------L0.?-----------| "
- - "Committing partition 1:"
- - " Soft Deleting 20 files: L0.93, L0.94, L0.107, L0.108, L0.121, L0.122, L0.137, L0.138, L0.151, L0.152, L0.165, L0.166, L0.179, L0.180, L0.193, L0.194, L0.210, L0.211, L0.220, L0.221"
- - " Creating 2 files"
- - "**** Simulation run 77, type=split(CompactAndSplitOutput(ManySmallFiles))(split_times=[1686846076379422391]). 20 Input Files, 143mb total:"
- - "L0 "
- - "L0.235[1686844862243243241,1686845733054054050] 1686933271.57s 11mb|------------------L0.235------------------| "
- - "L0.236[1686845733054054051,1686846603864864860] 1686933271.57s 11mb |------------------L0.236------------------| "
- - "L0.249[1686844862243243241,1686845733054054050] 1686933528.17s 5mb|------------------L0.249------------------| "
- - "L0.250[1686845733054054051,1686846603864864860] 1686933528.17s 5mb |------------------L0.250------------------| "
- - "L0.263[1686844862243243241,1686845733054054050] 1686933830.06s 5mb|------------------L0.263------------------| "
- - "L0.264[1686845733054054051,1686846603864864860] 1686933830.06s 5mb |------------------L0.264------------------| "
- - "L0.279[1686844862243243241,1686845733054054050] 1686934254.96s 8mb|------------------L0.279------------------| "
- - "L0.280[1686845733054054051,1686846603864864860] 1686934254.96s 8mb |------------------L0.280------------------| "
- - "L0.292[1686844862243243241,1686845733054054050] 1686934759.75s 10mb|------------------L0.292------------------| "
- - "L0.293[1686845733054054051,1686846603864864860] 1686934759.75s 10mb |------------------L0.293------------------| "
- - "L0.307[1686844862243243241,1686845733054054050] 1686934966.48s 5mb|------------------L0.307------------------| "
- - "L0.308[1686845733054054051,1686846603864864860] 1686934966.48s 5mb |------------------L0.308------------------| "
- - "L0.321[1686844862243243241,1686845733054054050] 1686935151.54s 5mb|------------------L0.321------------------| "
- - "L0.322[1686845733054054051,1686846603864864860] 1686935151.54s 5mb |------------------L0.322------------------| "
- - "L0.338[1686844862243243241,1686845733054054050] 1686935546.05s 10mb|------------------L0.338------------------| "
- - "L0.339[1686845733054054051,1686846603864864860] 1686935546.05s 10mb |------------------L0.339------------------| "
- - "L0.352[1686844862243243241,1686845733054054050] 1686935742.51s 5mb|------------------L0.352------------------| "
- - "L0.353[1686845733054054051,1686846603864864860] 1686935742.51s 5mb |------------------L0.353------------------| "
- - "L0.366[1686844862243243241,1686845733054054050] 1686935947.46s 5mb|------------------L0.366------------------| "
- - "L0.367[1686845733054054051,1686846603864864860] 1686935947.46s 5mb |------------------L0.367------------------| "
- - "**** 2 Output Files (parquet_file_id not yet assigned), 143mb total:"
- - "L0 "
- - "L0.?[1686844862243243241,1686846076379422391] 1686935947.46s 100mb|----------------------------L0.?----------------------------| "
- - "L0.?[1686846076379422392,1686846603864864860] 1686935947.46s 43mb |----------L0.?-----------| "
- - "Committing partition 1:"
- - " Soft Deleting 20 files: L0.235, L0.236, L0.249, L0.250, L0.263, L0.264, L0.279, L0.280, L0.292, L0.293, L0.307, L0.308, L0.321, L0.322, L0.338, L0.339, L0.352, L0.353, L0.366, L0.367"
- - " Creating 2 files"
- - "**** Simulation run 78, type=split(CompactAndSplitOutput(ManySmallFiles))(split_times=[1686847814506013787]). 20 Input Files, 144mb total:"
- - "L0 "
- - "L0.95[1686846603864864861,1686847474675675670] 1686929421.02s 10mb|------------------L0.95-------------------| "
- - "L0.96[1686847474675675671,1686848345486486480] 1686929421.02s 10mb |------------------L0.96-------------------| "
- - "L0.109[1686846603864864861,1686847474675675670] 1686929712.33s 5mb|------------------L0.109------------------| "
- - "L0.110[1686847474675675671,1686848345486486480] 1686929712.33s 5mb |------------------L0.110------------------| "
- - "L0.123[1686846603864864861,1686847474675675670] 1686929965.33s 5mb|------------------L0.123------------------| "
- - "L0.124[1686847474675675671,1686848345486486480] 1686929965.33s 5mb |------------------L0.124------------------| "
- - "L0.139[1686846603864864861,1686847474675675670] 1686930563.07s 11mb|------------------L0.139------------------| "
- - "L0.140[1686847474675675671,1686848345486486480] 1686930563.07s 11mb |------------------L0.140------------------| "
- - "L0.153[1686846603864864861,1686847474675675670] 1686930780.95s 4mb|------------------L0.153------------------| "
- - "L0.154[1686847474675675671,1686848345486486480] 1686930780.95s 4mb |------------------L0.154------------------| "
- - "L0.167[1686846603864864861,1686847474675675670] 1686931336.08s 11mb|------------------L0.167------------------| "
- - "L0.168[1686847474675675671,1686848345486486480] 1686931336.08s 11mb |------------------L0.168------------------| "
- - "L0.181[1686846603864864861,1686847474675675670] 1686931600.58s 5mb|------------------L0.181------------------| "
- - "L0.182[1686847474675675671,1686848345486486480] 1686931600.58s 5mb |------------------L0.182------------------| "
- - "L0.195[1686846603864864861,1686847474675675670] 1686931893.7s 5mb|------------------L0.195------------------| "
- - "L0.196[1686847474675675671,1686848345486486480] 1686931893.7s 5mb |------------------L0.196------------------| "
- - "L0.212[1686846603864864861,1686847474675675670] 1686932458.05s 10mb|------------------L0.212------------------| "
- - "L0.213[1686847474675675671,1686848345486486480] 1686932458.05s 10mb |------------------L0.213------------------| "
- - "L0.222[1686846603864864861,1686847474675675670] 1686932677.39s 4mb|------------------L0.222------------------| "
- - "L0.223[1686847474675675671,1686848345486486480] 1686932677.39s 4mb |------------------L0.223------------------| "
- - "**** 2 Output Files (parquet_file_id not yet assigned), 144mb total:"
- - "L0 "
- - "L0.?[1686846603864864861,1686847814506013787] 1686932677.39s 100mb|----------------------------L0.?----------------------------| "
- - "L0.?[1686847814506013788,1686848345486486480] 1686932677.39s 44mb |----------L0.?-----------| "
- - "Committing partition 1:"
- - " Soft Deleting 20 files: L0.95, L0.96, L0.109, L0.110, L0.123, L0.124, L0.139, L0.140, L0.153, L0.154, L0.167, L0.168, L0.181, L0.182, L0.195, L0.196, L0.212, L0.213, L0.222, L0.223"
- - " Creating 2 files"
- - "**** Simulation run 79, type=split(CompactAndSplitOutput(ManySmallFiles))(split_times=[1686847818001044011]). 20 Input Files, 143mb total:"
- - "L0 "
- - "L0.237[1686846603864864861,1686847474675675670] 1686933271.57s 11mb|------------------L0.237------------------| "
- - "L0.238[1686847474675675671,1686848345486486480] 1686933271.57s 11mb |------------------L0.238------------------| "
- - "L0.251[1686846603864864861,1686847474675675670] 1686933528.17s 5mb|------------------L0.251------------------| "
- - "L0.252[1686847474675675671,1686848345486486480] 1686933528.17s 5mb |------------------L0.252------------------| "
- - "L0.265[1686846603864864861,1686847474675675670] 1686933830.06s 5mb|------------------L0.265------------------| "
- - "L0.266[1686847474675675671,1686848345486486480] 1686933830.06s 5mb |------------------L0.266------------------| "
- - "L0.281[1686846603864864861,1686847474675675670] 1686934254.96s 8mb|------------------L0.281------------------| "
- - "L0.282[1686847474675675671,1686848345486486480] 1686934254.96s 8mb |------------------L0.282------------------| "
- - "L0.294[1686846603864864861,1686847474675675670] 1686934759.75s 10mb|------------------L0.294------------------| "
- - "L0.295[1686847474675675671,1686848345486486480] 1686934759.75s 10mb |------------------L0.295------------------| "
- - "L0.309[1686846603864864861,1686847474675675670] 1686934966.48s 5mb|------------------L0.309------------------| "
- - "L0.310[1686847474675675671,1686848345486486480] 1686934966.48s 5mb |------------------L0.310------------------| "
- - "L0.323[1686846603864864861,1686847474675675670] 1686935151.54s 5mb|------------------L0.323------------------| "
- - "L0.324[1686847474675675671,1686848345486486480] 1686935151.54s 5mb |------------------L0.324------------------| "
- - "L0.340[1686846603864864861,1686847474675675670] 1686935546.05s 10mb|------------------L0.340------------------| "
- - "L0.341[1686847474675675671,1686848345486486480] 1686935546.05s 10mb |------------------L0.341------------------| "
- - "L0.354[1686846603864864861,1686847474675675670] 1686935742.51s 5mb|------------------L0.354------------------| "
- - "L0.355[1686847474675675671,1686848345486486480] 1686935742.51s 5mb |------------------L0.355------------------| "
- - "L0.368[1686846603864864861,1686847474675675670] 1686935947.46s 5mb|------------------L0.368------------------| "
- - "L0.369[1686847474675675671,1686848345486486480] 1686935947.46s 5mb |------------------L0.369------------------| "
- - "**** 2 Output Files (parquet_file_id not yet assigned), 143mb total:"
- - "L0 "
- - "L0.?[1686846603864864861,1686847818001044011] 1686935947.46s 100mb|----------------------------L0.?----------------------------| "
- - "L0.?[1686847818001044012,1686848345486486480] 1686935947.46s 43mb |----------L0.?-----------| "
- - "Committing partition 1:"
- - " Soft Deleting 20 files: L0.237, L0.238, L0.251, L0.252, L0.265, L0.266, L0.281, L0.282, L0.294, L0.295, L0.309, L0.310, L0.323, L0.324, L0.340, L0.341, L0.354, L0.355, L0.368, L0.369"
- - " Creating 2 files"
- - "**** Simulation run 80, type=compact(ManySmallFiles). 2 Input Files, 6mb total:"
- - "L0, all files 3mb "
- - "L0.384[1686846603864864861,1686847474675675670] 1686936871.55s|------------------L0.384------------------| "
- - "L0.385[1686847474675675671,1686848345486486480] 1686936871.55s |------------------L0.385------------------| "
- - "**** 1 Output Files (parquet_file_id not yet assigned), 6mb total:"
- - "L0, all files 6mb "
- - "L0.?[1686846603864864861,1686848345486486480] 1686936871.55s|------------------------------------------L0.?------------------------------------------|"
- - "Committing partition 1:"
- - " Soft Deleting 2 files: L0.384, L0.385"
- - " Creating 1 files"
- - "**** Simulation run 81, type=split(CompactAndSplitOutput(ManySmallFiles))(split_times=[1686849600239388909]). 20 Input Files, 139mb total:"
- - "L0 "
- - "L0.97[1686848345486486481,1686849216297297290] 1686929421.02s 10mb|------------------L0.97-------------------| "
- - "L0.98[1686849216297297291,1686850087108108100] 1686929421.02s 10mb |------------------L0.98-------------------| "
- - "L0.111[1686848345486486481,1686849216297297290] 1686929712.33s 5mb|------------------L0.111------------------| "
- - "L0.112[1686849216297297291,1686850087108108100] 1686929712.33s 5mb |------------------L0.112------------------| "
- - "L0.125[1686848345486486481,1686849216297297290] 1686929965.33s 5mb|------------------L0.125------------------| "
- - "L0.126[1686849216297297291,1686850087108108100] 1686929965.33s 5mb |------------------L0.126------------------| "
- - "L0.141[1686848345486486481,1686849216297297290] 1686930563.07s 11mb|------------------L0.141------------------| "
- - "L0.142[1686849216297297291,1686850087108108100] 1686930563.07s 11mb |------------------L0.142------------------| "
- - "L0.155[1686848345486486481,1686849216297297290] 1686930780.95s 4mb|------------------L0.155------------------| "
- - "L0.156[1686849216297297291,1686850087108108100] 1686930780.95s 4mb |------------------L0.156------------------| "
- - "L0.169[1686848345486486481,1686849216297297290] 1686931336.08s 11mb|------------------L0.169------------------| "
- - "L0.170[1686849216297297291,1686850087108108100] 1686931336.08s 11mb |------------------L0.170------------------| "
- - "L0.183[1686848345486486481,1686849216297297290] 1686931600.58s 5mb|------------------L0.183------------------| "
- - "L0.184[1686849216297297291,1686850087108108100] 1686931600.58s 5mb |------------------L0.184------------------| "
- - "L0.197[1686848345486486481,1686849216297297290] 1686931893.7s 5mb|------------------L0.197------------------| "
- - "L0.198[1686849216297297291,1686850087108108100] 1686931893.7s 5mb |------------------L0.198------------------| "
- - "L0.214[1686848345486486481,1686849216297297290] 1686932458.05s 10mb|------------------L0.214------------------| "
- - "L0.215[1686849216297297291,1686849719000000000] 1686932458.05s 6mb |--------L0.215---------| "
- - "L0.419[1686849779000000000,1686850087108108100] 1686932458.05s 4mb |---L0.419----| "
- - "L0.224[1686848345486486481,1686849216297297290] 1686932677.39s 4mb|------------------L0.224------------------| "
- - "**** 2 Output Files (parquet_file_id not yet assigned), 139mb total:"
- - "L0 "
- - "L0.?[1686848345486486481,1686849600239388909] 1686932677.39s 100mb|-----------------------------L0.?-----------------------------| "
- - "L0.?[1686849600239388910,1686850087108108100] 1686932677.39s 39mb |---------L0.?----------| "
- - "Committing partition 1:"
- - " Soft Deleting 20 files: L0.97, L0.98, L0.111, L0.112, L0.125, L0.126, L0.141, L0.142, L0.155, L0.156, L0.169, L0.170, L0.183, L0.184, L0.197, L0.198, L0.214, L0.215, L0.224, L0.419"
- - " Creating 2 files"
- - "**** Simulation run 82, type=split(CompactAndSplitOutput(ManySmallFiles))(split_times=[1686849568759166090]). 20 Input Files, 142mb total:"
- - "L0 "
- - "L0.225[1686849216297297291,1686850087108108100] 1686932677.39s 4mb |------------------L0.225------------------| "
- - "L0.239[1686848345486486481,1686849216297297290] 1686933271.57s 11mb|------------------L0.239------------------| "
- - "L0.240[1686849216297297291,1686850087108108100] 1686933271.57s 11mb |------------------L0.240------------------| "
- - "L0.253[1686848345486486481,1686849216297297290] 1686933528.17s 5mb|------------------L0.253------------------| "
- - "L0.254[1686849216297297291,1686850087108108100] 1686933528.17s 5mb |------------------L0.254------------------| "
- - "L0.267[1686848345486486481,1686849216297297290] 1686933830.06s 5mb|------------------L0.267------------------| "
- - "L0.268[1686849216297297291,1686850087108108100] 1686933830.06s 5mb |------------------L0.268------------------| "
- - "L0.283[1686848345486486481,1686849216297297290] 1686934254.96s 8mb|------------------L0.283------------------| "
- - "L0.284[1686849216297297291,1686850087108108100] 1686934254.96s 8mb |------------------L0.284------------------| "
- - "L0.296[1686848345486486481,1686849216297297290] 1686934759.75s 10mb|------------------L0.296------------------| "
- - "L0.297[1686849216297297291,1686850087108108100] 1686934759.75s 10mb |------------------L0.297------------------| "
- - "L0.311[1686848345486486481,1686849216297297290] 1686934966.48s 5mb|------------------L0.311------------------| "
- - "L0.312[1686849216297297291,1686850087108108100] 1686934966.48s 5mb |------------------L0.312------------------| "
- - "L0.325[1686848345486486481,1686849216297297290] 1686935151.54s 5mb|------------------L0.325------------------| "
- - "L0.326[1686849216297297291,1686850087108108100] 1686935151.54s 5mb |------------------L0.326------------------| "
- - "L0.342[1686848345486486481,1686849216297297290] 1686935546.05s 10mb|------------------L0.342------------------| "
- - "L0.343[1686849216297297291,1686850087108108100] 1686935546.05s 10mb |------------------L0.343------------------| "
- - "L0.356[1686848345486486481,1686849216297297290] 1686935742.51s 5mb|------------------L0.356------------------| "
- - "L0.357[1686849216297297291,1686850087108108100] 1686935742.51s 5mb |------------------L0.357------------------| "
- - "L0.370[1686848345486486481,1686849216297297290] 1686935947.46s 5mb|------------------L0.370------------------| "
- - "**** 2 Output Files (parquet_file_id not yet assigned), 142mb total:"
- - "L0 "
- - "L0.?[1686848345486486481,1686849568759166090] 1686935947.46s 100mb|----------------------------L0.?-----------------------------| "
- - "L0.?[1686849568759166091,1686850087108108100] 1686935947.46s 42mb |----------L0.?----------| "
- - "Committing partition 1:"
- - " Soft Deleting 20 files: L0.225, L0.239, L0.240, L0.253, L0.254, L0.267, L0.268, L0.283, L0.284, L0.296, L0.297, L0.311, L0.312, L0.325, L0.326, L0.342, L0.343, L0.356, L0.357, L0.370"
- - " Creating 2 files"
- - "**** Simulation run 83, type=compact(ManySmallFiles). 3 Input Files, 11mb total:"
- - "L0 "
- - "L0.371[1686849216297297291,1686850087108108100] 1686935947.46s 5mb |------------------L0.371------------------| "
- - "L0.386[1686848345486486481,1686849216297297290] 1686936871.55s 3mb|------------------L0.386------------------| "
- - "L0.387[1686849216297297291,1686850087108108100] 1686936871.55s 3mb |------------------L0.387------------------| "
- - "**** 1 Output Files (parquet_file_id not yet assigned), 11mb total:"
- - "L0, all files 11mb "
- - "L0.?[1686848345486486481,1686850087108108100] 1686936871.55s|------------------------------------------L0.?------------------------------------------|"
- - "Committing partition 1:"
- - " Soft Deleting 3 files: L0.371, L0.386, L0.387"
- - " Creating 1 files"
- - "**** Simulation run 84, type=split(CompactAndSplitOutput(ManySmallFiles))(split_times=[1686851297766913590]). 20 Input Files, 144mb total:"
- - "L0 "
- - "L0.99[1686850087108108101,1686850957918918910] 1686929421.02s 10mb|------------------L0.99-------------------| "
- - "L0.100[1686850957918918911,1686851828729729720] 1686929421.02s 10mb |------------------L0.100------------------| "
- - "L0.113[1686850087108108101,1686850957918918910] 1686929712.33s 5mb|------------------L0.113------------------| "
- - "L0.114[1686850957918918911,1686851828729729720] 1686929712.33s 5mb |------------------L0.114------------------| "
- - "L0.127[1686850087108108101,1686850957918918910] 1686929965.33s 5mb|------------------L0.127------------------| "
- - "L0.128[1686850957918918911,1686851828729729720] 1686929965.33s 5mb |------------------L0.128------------------| "
- - "L0.143[1686850087108108101,1686850957918918910] 1686930563.07s 11mb|------------------L0.143------------------| "
- - "L0.144[1686850957918918911,1686851828729729720] 1686930563.07s 11mb |------------------L0.144------------------| "
- - "L0.157[1686850087108108101,1686850957918918910] 1686930780.95s 4mb|------------------L0.157------------------| "
- - "L0.158[1686850957918918911,1686851828729729720] 1686930780.95s 4mb |------------------L0.158------------------| "
- - "L0.171[1686850087108108101,1686850957918918910] 1686931336.08s 11mb|------------------L0.171------------------| "
- - "L0.172[1686850957918918911,1686851828729729720] 1686931336.08s 11mb |------------------L0.172------------------| "
- - "L0.185[1686850087108108101,1686850957918918910] 1686931600.58s 5mb|------------------L0.185------------------| "
- - "L0.186[1686850957918918911,1686851828729729720] 1686931600.58s 5mb |------------------L0.186------------------| "
- - "L0.199[1686850087108108101,1686850957918918910] 1686931893.7s 5mb|------------------L0.199------------------| "
- - "L0.200[1686850957918918911,1686851828729729720] 1686931893.7s 5mb |------------------L0.200------------------| "
- - "L0.420[1686850087108108101,1686850957918918910] 1686932458.05s 10mb|------------------L0.420------------------| "
- - "L0.421[1686850957918918911,1686851828729729720] 1686932458.05s 10mb |------------------L0.421------------------| "
- - "L0.226[1686850087108108101,1686850957918918910] 1686932677.39s 4mb|------------------L0.226------------------| "
- - "L0.227[1686850957918918911,1686851828729729720] 1686932677.39s 4mb |------------------L0.227------------------| "
- - "**** 2 Output Files (parquet_file_id not yet assigned), 144mb total:"
- - "L0 "
- - "L0.?[1686850087108108101,1686851297766913590] 1686932677.39s 100mb|----------------------------L0.?----------------------------| "
- - "L0.?[1686851297766913591,1686851828729729720] 1686932677.39s 44mb |----------L0.?-----------| "
- - "Committing partition 1:"
- - " Soft Deleting 20 files: L0.99, L0.100, L0.113, L0.114, L0.127, L0.128, L0.143, L0.144, L0.157, L0.158, L0.171, L0.172, L0.185, L0.186, L0.199, L0.200, L0.226, L0.227, L0.420, L0.421"
- - " Creating 2 files"
- - "**** Simulation run 85, type=split(CompactAndSplitOutput(ManySmallFiles))(split_times=[1686851301244287251]). 20 Input Files, 143mb total:"
- - "L0 "
- - "L0.241[1686850087108108101,1686850957918918910] 1686933271.57s 11mb|------------------L0.241------------------| "
- - "L0.242[1686850957918918911,1686851828729729720] 1686933271.57s 11mb |------------------L0.242------------------| "
- - "L0.255[1686850087108108101,1686850957918918910] 1686933528.17s 5mb|------------------L0.255------------------| "
- - "L0.256[1686850957918918911,1686851828729729720] 1686933528.17s 5mb |------------------L0.256------------------| "
- - "L0.269[1686850087108108101,1686850957918918910] 1686933830.06s 5mb|------------------L0.269------------------| "
- - "L0.270[1686850957918918911,1686851828729729720] 1686933830.06s 5mb |------------------L0.270------------------| "
- - "L0.285[1686850087108108101,1686850957918918910] 1686934254.96s 8mb|------------------L0.285------------------| "
- - "L0.286[1686850957918918911,1686851828729729720] 1686934254.96s 8mb |------------------L0.286------------------| "
- - "L0.298[1686850087108108101,1686850957918918910] 1686934759.75s 10mb|------------------L0.298------------------| "
- - "L0.299[1686850957918918911,1686851828729729720] 1686934759.75s 10mb |------------------L0.299------------------| "
- - "L0.313[1686850087108108101,1686850957918918910] 1686934966.48s 5mb|------------------L0.313------------------| "
- - "L0.314[1686850957918918911,1686851828729729720] 1686934966.48s 5mb |------------------L0.314------------------| "
- - "L0.327[1686850087108108101,1686850957918918910] 1686935151.54s 5mb|------------------L0.327------------------| "
- - "L0.328[1686850957918918911,1686851828729729720] 1686935151.54s 5mb |------------------L0.328------------------| "
- - "L0.344[1686850087108108101,1686850957918918910] 1686935546.05s 10mb|------------------L0.344------------------| "
- - "L0.345[1686850957918918911,1686851828729729720] 1686935546.05s 10mb |------------------L0.345------------------| "
- - "L0.358[1686850087108108101,1686850957918918910] 1686935742.51s 5mb|------------------L0.358------------------| "
- - "L0.359[1686850957918918911,1686851828729729720] 1686935742.51s 5mb |------------------L0.359------------------| "
- - "L0.372[1686850087108108101,1686850957918918910] 1686935947.46s 5mb|------------------L0.372------------------| "
- - "L0.373[1686850957918918911,1686851828729729720] 1686935947.46s 5mb |------------------L0.373------------------| "
- - "**** 2 Output Files (parquet_file_id not yet assigned), 143mb total:"
- - "L0 "
- - "L0.?[1686850087108108101,1686851301244287251] 1686935947.46s 100mb|----------------------------L0.?----------------------------| "
- - "L0.?[1686851301244287252,1686851828729729720] 1686935947.46s 43mb |----------L0.?-----------| "
- - "Committing partition 1:"
- - " Soft Deleting 20 files: L0.241, L0.242, L0.255, L0.256, L0.269, L0.270, L0.285, L0.286, L0.298, L0.299, L0.313, L0.314, L0.327, L0.328, L0.344, L0.345, L0.358, L0.359, L0.372, L0.373"
- - " Creating 2 files"
- - "**** Simulation run 86, type=compact(ManySmallFiles). 2 Input Files, 6mb total:"
- - "L0, all files 3mb "
- - "L0.382[1686844862243243241,1686845733054054050] 1686936871.55s|------------------L0.382------------------| "
- - "L0.383[1686845733054054051,1686846603864864860] 1686936871.55s |------------------L0.383------------------| "
- - "**** 1 Output Files (parquet_file_id not yet assigned), 6mb total:"
- - "L0, all files 6mb "
- - "L0.?[1686844862243243241,1686846603864864860] 1686936871.55s|------------------------------------------L0.?------------------------------------------|"
- - "Committing partition 1:"
- - " Soft Deleting 2 files: L0.382, L0.383"
- - " Creating 1 files"
- - "**** Simulation run 87, type=compact(ManySmallFiles). 2 Input Files, 6mb total:"
- - "L0, all files 3mb "
- - "L0.388[1686850087108108101,1686850957918918910] 1686936871.55s|------------------L0.388------------------| "
- - "L0.389[1686850957918918911,1686851828729729720] 1686936871.55s |------------------L0.389------------------| "
- - "**** 1 Output Files (parquet_file_id not yet assigned), 6mb total:"
- - "L0, all files 6mb "
- - "L0.?[1686850087108108101,1686851828729729720] 1686936871.55s|------------------------------------------L0.?------------------------------------------|"
- - "Committing partition 1:"
- - " Soft Deleting 2 files: L0.388, L0.389"
- - " Creating 1 files"
- - "**** Simulation run 88, type=split(CompactAndSplitOutput(ManySmallFiles))(split_times=[1686853222027027016]). 20 Input Files, 102mb total:"
- - "L0 "
- - "L0.101[1686851828729729721,1686852699540540530] 1686929421.02s 10mb|------------------L0.101------------------| "
- - "L0.102[1686852699540540531,1686853319000000000] 1686929421.02s 7mb |------------L0.102------------| "
- - "L0.563[1686853379000000000,1686853570351351340] 1686929421.02s 1mb |L0.563-| "
- - "L0.115[1686851828729729721,1686852699540540530] 1686929712.33s 5mb|------------------L0.115------------------| "
- - "L0.116[1686852699540540531,1686852839000000000] 1686929712.33s 898kb |L0.116| "
- - "L0.451[1686852899000000000,1686853570351351340] 1686929712.33s 8mb |-------------L0.451-------------| "
- - "L0.129[1686851828729729721,1686852699540540530] 1686929965.33s 5mb|------------------L0.129------------------| "
- - "L0.130[1686852699540540531,1686853570351351340] 1686929965.33s 5mb |------------------L0.130------------------| "
- - "L0.145[1686851828729729721,1686852699540540530] 1686930563.07s 11mb|------------------L0.145------------------| "
- - "L0.146[1686852699540540531,1686853019000000000] 1686930563.07s 4mb |----L0.146----| "
- - "L0.521[1686853079000000000,1686853570351351340] 1686930563.07s 3mb |--------L0.521---------| "
- - "L0.159[1686851828729729721,1686852699540540530] 1686930780.95s 4mb|------------------L0.159------------------| "
- - "L0.160[1686852699540540531,1686853379000000000] 1686930780.95s 3mb |-------------L0.160--------------| "
- - "L0.593[1686853439000000000,1686853570351351340] 1686930780.95s 1mb |L0.593|"
- - "L0.173[1686851828729729721,1686852699540540530] 1686931336.08s 11mb|------------------L0.173------------------| "
- - "L0.174[1686852699540540531,1686853079000000000] 1686931336.08s 5mb |-----L0.174------| "
- - "L0.549[1686853139000000000,1686853570351351340] 1686931336.08s 3mb |-------L0.549-------| "
- - "L0.187[1686851828729729721,1686852699540540530] 1686931600.58s 5mb|------------------L0.187------------------| "
- - "L0.188[1686852699540540531,1686852899000000000] 1686931600.58s 1mb |-L0.188-| "
- - "L0.479[1686852959000000000,1686853570351351340] 1686931600.58s 7mb |-----------L0.479------------| "
- - "**** 2 Output Files (parquet_file_id not yet assigned), 102mb total:"
- - "L0 "
- - "L0.?[1686851828729729721,1686853222027027016] 1686931600.58s 82mb|--------------------------------L0.?---------------------------------| "
- - "L0.?[1686853222027027017,1686853570351351340] 1686931600.58s 20mb |-----L0.?------| "
- - "Committing partition 1:"
- - " Soft Deleting 20 files: L0.101, L0.102, L0.115, L0.116, L0.129, L0.130, L0.145, L0.146, L0.159, L0.160, L0.173, L0.174, L0.187, L0.188, L0.451, L0.479, L0.521, L0.549, L0.563, L0.593"
- - " Creating 2 files"
- - "**** Simulation run 89, type=split(CompactAndSplitOutput(ManySmallFiles))(split_times=[1686853236700974398]). 20 Input Files, 124mb total:"
- - "L0 "
- - "L0.201[1686851828729729721,1686852699540540530] 1686931893.7s 5mb|------------------L0.201------------------| "
- - "L0.202[1686852699540540531,1686853570351351340] 1686931893.7s 5mb |------------------L0.202------------------| "
- - "L0.422[1686851828729729721,1686852699540540530] 1686932458.05s 10mb|------------------L0.422------------------| "
- - "L0.423[1686852699540540531,1686853570351351340] 1686932458.05s 10mb |------------------L0.423------------------| "
- - "L0.228[1686851828729729721,1686852699540540530] 1686932677.39s 4mb|------------------L0.228------------------| "
- - "L0.229[1686852699540540531,1686853570351351340] 1686932677.39s 4mb |------------------L0.229------------------| "
- - "L0.243[1686851828729729721,1686852699540540530] 1686933271.57s 11mb|------------------L0.243------------------| "
- - "L0.244[1686852699540540531,1686853019000000000] 1686933271.57s 4mb |----L0.244----| "
- - "L0.535[1686853079000000000,1686853570351351340] 1686933271.57s 3mb |--------L0.535---------| "
- - "L0.257[1686851828729729721,1686852699540540530] 1686933528.17s 5mb|------------------L0.257------------------| "
- - "L0.258[1686852699540540531,1686852959000000000] 1686933528.17s 2mb |--L0.258---| "
- - "L0.507[1686853019000000000,1686853570351351340] 1686933528.17s 6mb |----------L0.507----------| "
- - "L0.271[1686851828729729721,1686852699540540530] 1686933830.06s 5mb|------------------L0.271------------------| "
- - "L0.272[1686852699540540531,1686853570351351340] 1686933830.06s 5mb |------------------L0.272------------------| "
- - "L0.287[1686851828729729721,1686852119000000000] 1686934254.96s 3mb|---L0.287----| "
- - "L0.438[1686852179000000000,1686852699540540530] 1686934254.96s 5mb |---------L0.438---------| "
- - "L0.439[1686852699540540531,1686853570351351340] 1686934254.96s 8mb |------------------L0.439------------------| "
- - "L0.300[1686851828729729721,1686852699540540530] 1686934759.75s 10mb|------------------L0.300------------------| "
- - "L0.301[1686852699540540531,1686853570351351340] 1686934759.75s 10mb |------------------L0.301------------------| "
- - "L0.315[1686851828729729721,1686852699540540530] 1686934966.48s 5mb|------------------L0.315------------------| "
- - "**** 2 Output Files (parquet_file_id not yet assigned), 124mb total:"
- - "L0 "
- - "L0.?[1686851828729729721,1686853236700974398] 1686934966.48s 100mb|---------------------------------L0.?---------------------------------| "
- - "L0.?[1686853236700974399,1686853570351351340] 1686934966.48s 24mb |-----L0.?------| "
- - "Committing partition 1:"
- - " Soft Deleting 20 files: L0.201, L0.202, L0.228, L0.229, L0.243, L0.244, L0.257, L0.258, L0.271, L0.272, L0.287, L0.300, L0.301, L0.315, L0.422, L0.423, L0.438, L0.439, L0.507, L0.535"
- - " Creating 2 files"
- - "**** Simulation run 90, type=split(CompactAndSplitOutput(ManySmallFiles))(split_times=[1686853222027027016]). 14 Input Files, 68mb total:"
- - "L0 "
- - "L0.316[1686852699540540531,1686852899000000000] 1686934966.48s 1mb |-L0.316-| "
- - "L0.493[1686852959000000000,1686853570351351340] 1686934966.48s 7mb |-----------L0.493------------| "
- - "L0.329[1686851828729729721,1686852699540540530] 1686935151.54s 5mb|------------------L0.329------------------| "
- - "L0.330[1686852699540540531,1686853570351351340] 1686935151.54s 5mb |------------------L0.330------------------| "
- - "L0.346[1686851828729729721,1686852699540540530] 1686935546.05s 10mb|------------------L0.346------------------| "
- - "L0.347[1686852699540540531,1686853319000000000] 1686935546.05s 7mb |------------L0.347------------| "
- - "L0.578[1686853379000000000,1686853570351351340] 1686935546.05s 1mb |L0.578-| "
- - "L0.360[1686851828729729721,1686852699540540530] 1686935742.51s 5mb|------------------L0.360------------------| "
- - "L0.361[1686852699540540531,1686852839000000000] 1686935742.51s 899kb |L0.361| "
- - "L0.465[1686852899000000000,1686853570351351340] 1686935742.51s 8mb |-------------L0.465-------------| "
- - "L0.374[1686851828729729721,1686852699540540530] 1686935947.46s 5mb|------------------L0.374------------------| "
- - "L0.375[1686852699540540531,1686853570351351340] 1686935947.46s 5mb |------------------L0.375------------------| "
- - "L0.390[1686851828729729721,1686852699540540530] 1686936871.55s 3mb|------------------L0.390------------------| "
- - "L0.391[1686852699540540531,1686853570351351340] 1686936871.55s 3mb |------------------L0.391------------------| "
- - "**** 2 Output Files (parquet_file_id not yet assigned), 68mb total:"
- - "L0 "
- - "L0.?[1686851828729729721,1686853222027027016] 1686936871.55s 55mb|--------------------------------L0.?---------------------------------| "
- - "L0.?[1686853222027027017,1686853570351351340] 1686936871.55s 14mb |-----L0.?------| "
- - "Committing partition 1:"
- - " Soft Deleting 14 files: L0.316, L0.329, L0.330, L0.346, L0.347, L0.360, L0.361, L0.374, L0.375, L0.390, L0.391, L0.465, L0.493, L0.578"
- - " Creating 2 files"
- - "**** Simulation run 91, type=split(CompactAndSplitOutput(ManySmallFiles))(split_times=[1686854830189955965]). 20 Input Files, 138mb total:"
- - "L0 "
- - "L0.564[1686853570351351341,1686854441162162150] 1686929421.02s 6mb|------------------L0.564------------------| "
- - "L0.565[1686854441162162151,1686855311972972960] 1686929421.02s 6mb |------------------L0.565------------------| "
- - "L0.452[1686853570351351341,1686854441162162150] 1686929712.33s 10mb|------------------L0.452------------------| "
- - "L0.453[1686854441162162151,1686855311972972960] 1686929712.33s 10mb |------------------L0.453------------------| "
- - "L0.131[1686853570351351341,1686854441162162150] 1686929965.33s 5mb|------------------L0.131------------------| "
- - "L0.132[1686854441162162151,1686855059000000000] 1686929965.33s 4mb |-----------L0.132------------| "
- - "L0.661[1686855119000000000,1686855311972972960] 1686929965.33s 1mb |L0.661-| "
- - "L0.522[1686853570351351341,1686854441162162150] 1686930563.07s 6mb|------------------L0.522------------------| "
- - "L0.523[1686854441162162151,1686855311972972960] 1686930563.07s 6mb |------------------L0.523------------------| "
- - "L0.594[1686853570351351341,1686854441162162150] 1686930780.95s 9mb|------------------L0.594------------------| "
- - "L0.595[1686854441162162151,1686855311972972960] 1686930780.95s 9mb |------------------L0.595------------------| "
- - "L0.550[1686853570351351341,1686854441162162150] 1686931336.08s 6mb|------------------L0.550------------------| "
- - "L0.551[1686854441162162151,1686855311972972960] 1686931336.08s 6mb |------------------L0.551------------------| "
- - "L0.480[1686853570351351341,1686854441162162150] 1686931600.58s 10mb|------------------L0.480------------------| "
- - "L0.481[1686854441162162151,1686855311972972960] 1686931600.58s 10mb |------------------L0.481------------------| "
- - "L0.203[1686853570351351341,1686854441162162150] 1686931893.7s 5mb|------------------L0.203------------------| "
- - "L0.204[1686854441162162151,1686855311972972960] 1686931893.7s 5mb |------------------L0.204------------------| "
- - "L0.424[1686853570351351341,1686854441162162150] 1686932458.05s 10mb|------------------L0.424------------------| "
- - "L0.425[1686854441162162151,1686855311972972960] 1686932458.05s 10mb |------------------L0.425------------------| "
- - "L0.230[1686853570351351341,1686854219000000000] 1686932677.39s 3mb|------------L0.230-------------| "
- - "**** 2 Output Files (parquet_file_id not yet assigned), 138mb total:"
- - "L0 "
- - "L0.?[1686853570351351341,1686854830189955965] 1686932677.39s 100mb|-----------------------------L0.?------------------------------| "
- - "L0.?[1686854830189955966,1686855311972972960] 1686932677.39s 38mb |---------L0.?---------| "
- - "Committing partition 1:"
- - " Soft Deleting 20 files: L0.131, L0.132, L0.203, L0.204, L0.230, L0.424, L0.425, L0.452, L0.453, L0.480, L0.481, L0.522, L0.523, L0.550, L0.551, L0.564, L0.565, L0.594, L0.595, L0.661"
- - " Creating 2 files"
- - "**** Simulation run 92, type=split(CompactAndSplitOutput(ManySmallFiles))(split_times=[1686854963648648636]). 20 Input Files, 118mb total:"
- - "L0 "
- - "L0.623[1686854279000000000,1686854441162162150] 1686932677.39s 1006kb |L0.623| "
- - "L0.624[1686854441162162151,1686855311972972960] 1686932677.39s 5mb |------------------L0.624------------------| "
- - "L0.536[1686853570351351341,1686854441162162150] 1686933271.57s 6mb|------------------L0.536------------------| "
- - "L0.537[1686854441162162151,1686855311972972960] 1686933271.57s 6mb |------------------L0.537------------------| "
- - "L0.508[1686853570351351341,1686854441162162150] 1686933528.17s 10mb|------------------L0.508------------------| "
- - "L0.509[1686854441162162151,1686855311972972960] 1686933528.17s 10mb |------------------L0.509------------------| "
- - "L0.273[1686853570351351341,1686854441162162150] 1686933830.06s 5mb|------------------L0.273------------------| "
- - "L0.274[1686854441162162151,1686855119000000000] 1686933830.06s 4mb |-------------L0.274--------------| "
- - "L0.678[1686855179000000000,1686855311972972960] 1686933830.06s 920kb |L0.678|"
- - "L0.440[1686853570351351341,1686854441162162150] 1686934254.96s 8mb|------------------L0.440------------------| "
- - "L0.441[1686854441162162151,1686855311972972960] 1686934254.96s 8mb |------------------L0.441------------------| "
- - "L0.302[1686853570351351341,1686853679000000000] 1686934759.75s 1mb|L0.302| "
- - "L0.608[1686853739000000000,1686854441162162150] 1686934759.75s 4mb |--------------L0.608--------------| "
- - "L0.609[1686854441162162151,1686855311972972960] 1686934759.75s 5mb |------------------L0.609------------------| "
- - "L0.494[1686853570351351341,1686854441162162150] 1686934966.48s 10mb|------------------L0.494------------------| "
- - "L0.495[1686854441162162151,1686855311972972960] 1686934966.48s 10mb |------------------L0.495------------------| "
- - "L0.331[1686853570351351341,1686854441162162150] 1686935151.54s 5mb|------------------L0.331------------------| "
- - "L0.332[1686854441162162151,1686855311972972960] 1686935151.54s 5mb |------------------L0.332------------------| "
- - "L0.579[1686853570351351341,1686854441162162150] 1686935546.05s 6mb|------------------L0.579------------------| "
- - "L0.580[1686854441162162151,1686855311972972960] 1686935546.05s 6mb |------------------L0.580------------------| "
- - "**** 2 Output Files (parquet_file_id not yet assigned), 118mb total:"
- - "L0 "
- - "L0.?[1686853570351351341,1686854963648648636] 1686935546.05s 94mb|--------------------------------L0.?---------------------------------| "
- - "L0.?[1686854963648648637,1686855311972972960] 1686935546.05s 24mb |-----L0.?------| "
- - "Committing partition 1:"
- - " Soft Deleting 20 files: L0.273, L0.274, L0.302, L0.331, L0.332, L0.440, L0.441, L0.494, L0.495, L0.508, L0.509, L0.536, L0.537, L0.579, L0.580, L0.608, L0.609, L0.623, L0.624, L0.678"
- - " Creating 2 files"
- - "**** Simulation run 93, type=split(CompactAndSplitOutput(ManySmallFiles))(split_times=[1686854963648648636]). 7 Input Files, 37mb total:"
- - "L0 "
- - "L0.466[1686853570351351341,1686854441162162150] 1686935742.51s 10mb|------------------L0.466------------------| "
- - "L0.467[1686854441162162151,1686855311972972960] 1686935742.51s 10mb |------------------L0.467------------------| "
- - "L0.376[1686853570351351341,1686854441162162150] 1686935947.46s 5mb|------------------L0.376------------------| "
- - "L0.377[1686854441162162151,1686854759000000000] 1686935947.46s 2mb |----L0.377----| "
- - "L0.639[1686854819000000000,1686855311972972960] 1686935947.46s 3mb |--------L0.639---------| "
- - "L0.392[1686853570351351341,1686854441162162150] 1686936871.55s 3mb|------------------L0.392------------------| "
- - "L0.393[1686854441162162151,1686855311972972960] 1686936871.55s 3mb |------------------L0.393------------------| "
- - "**** 2 Output Files (parquet_file_id not yet assigned), 37mb total:"
- - "L0 "
- - "L0.?[1686853570351351341,1686854963648648636] 1686936871.55s 29mb|--------------------------------L0.?---------------------------------| "
- - "L0.?[1686854963648648637,1686855311972972960] 1686936871.55s 7mb |-----L0.?------| "
- - "Committing partition 1:"
- - " Soft Deleting 7 files: L0.376, L0.377, L0.392, L0.393, L0.466, L0.467, L0.639"
- - " Creating 2 files"
- - "**** Simulation run 94, type=split(CompactAndSplitOutput(ManySmallFiles))(split_times=[1686856536496842959]). 20 Input Files, 142mb total:"
- - "L0 "
- - "L0.566[1686855311972972961,1686856182783783770] 1686929421.02s 6mb|------------------L0.566------------------| "
- - "L0.567[1686856182783783771,1686857053594594580] 1686929421.02s 6mb |------------------L0.567------------------| "
- - "L0.454[1686855311972972961,1686856182783783770] 1686929712.33s 10mb|------------------L0.454------------------| "
- - "L0.455[1686856182783783771,1686857053594594580] 1686929712.33s 10mb |------------------L0.455------------------| "
- - "L0.662[1686855311972972961,1686856182783783770] 1686929965.33s 6mb|------------------L0.662------------------| "
- - "L0.663[1686856182783783771,1686857053594594580] 1686929965.33s 6mb |------------------L0.663------------------| "
- - "L0.524[1686855311972972961,1686856182783783770] 1686930563.07s 6mb|------------------L0.524------------------| "
- - "L0.525[1686856182783783771,1686857053594594580] 1686930563.07s 6mb |------------------L0.525------------------| "
- - "L0.596[1686855311972972961,1686856182783783770] 1686930780.95s 9mb|------------------L0.596------------------| "
- - "L0.597[1686856182783783771,1686857053594594580] 1686930780.95s 9mb |------------------L0.597------------------| "
- - "L0.552[1686855311972972961,1686856182783783770] 1686931336.08s 6mb|------------------L0.552------------------| "
- - "L0.553[1686856182783783771,1686857053594594580] 1686931336.08s 6mb |------------------L0.553------------------| "
- - "L0.482[1686855311972972961,1686856182783783770] 1686931600.58s 10mb|------------------L0.482------------------| "
- - "L0.483[1686856182783783771,1686857053594594580] 1686931600.58s 10mb |------------------L0.483------------------| "
- - "L0.205[1686855311972972961,1686855659000000000] 1686931893.7s 2mb|----L0.205-----| "
- - "L0.712[1686855719000000000,1686856182783783770] 1686931893.7s 3mb |-------L0.712--------| "
- - "L0.713[1686856182783783771,1686857053594594580] 1686931893.7s 6mb |------------------L0.713------------------| "
- - "L0.426[1686855311972972961,1686856182783783770] 1686932458.05s 10mb|------------------L0.426------------------| "
- - "L0.427[1686856182783783771,1686857053594594580] 1686932458.05s 10mb |------------------L0.427------------------| "
- - "L0.625[1686855311972972961,1686856182783783770] 1686932677.39s 5mb|------------------L0.625------------------| "
- - "**** 2 Output Files (parquet_file_id not yet assigned), 142mb total:"
- - "L0 "
- - "L0.?[1686855311972972961,1686856536496842959] 1686932677.39s 100mb|----------------------------L0.?-----------------------------| "
- - "L0.?[1686856536496842960,1686857053594594580] 1686932677.39s 42mb |----------L0.?----------| "
- - "Committing partition 1:"
- - " Soft Deleting 20 files: L0.205, L0.426, L0.427, L0.454, L0.455, L0.482, L0.483, L0.524, L0.525, L0.552, L0.553, L0.566, L0.567, L0.596, L0.597, L0.625, L0.662, L0.663, L0.712, L0.713"
- - " Creating 2 files"
- - "**** Simulation run 95, type=split(CompactAndSplitOutput(ManySmallFiles))(split_times=[1686856564304278603]). 20 Input Files, 139mb total:"
- - "L0 "
- - "L0.626[1686856182783783771,1686857053594594580] 1686932677.39s 5mb |------------------L0.626------------------| "
- - "L0.538[1686855311972972961,1686856182783783770] 1686933271.57s 6mb|------------------L0.538------------------| "
- - "L0.539[1686856182783783771,1686857053594594580] 1686933271.57s 6mb |------------------L0.539------------------| "
- - "L0.510[1686855311972972961,1686856182783783770] 1686933528.17s 10mb|------------------L0.510------------------| "
- - "L0.511[1686856182783783771,1686857053594594580] 1686933528.17s 10mb |------------------L0.511------------------| "
- - "L0.679[1686855311972972961,1686856182783783770] 1686933830.06s 6mb|------------------L0.679------------------| "
- - "L0.680[1686856182783783771,1686857053594594580] 1686933830.06s 6mb |------------------L0.680------------------| "
- - "L0.442[1686855311972972961,1686856182783783770] 1686934254.96s 8mb|------------------L0.442------------------| "
- - "L0.443[1686856182783783771,1686857053594594580] 1686934254.96s 8mb |------------------L0.443------------------| "
- - "L0.610[1686855311972972961,1686856182783783770] 1686934759.75s 5mb|------------------L0.610------------------| "
- - "L0.611[1686856182783783771,1686857053594594580] 1686934759.75s 5mb |------------------L0.611------------------| "
- - "L0.496[1686855311972972961,1686856182783783770] 1686934966.48s 10mb|------------------L0.496------------------| "
- - "L0.497[1686856182783783771,1686857053594594580] 1686934966.48s 10mb |------------------L0.497------------------| "
- - "L0.333[1686855311972972961,1686855419000000000] 1686935151.54s 649kb|L0.333| "
- - "L0.695[1686855479000000000,1686856182783783770] 1686935151.54s 5mb |--------------L0.695--------------| "
- - "L0.696[1686856182783783771,1686857053594594580] 1686935151.54s 6mb |------------------L0.696------------------| "
- - "L0.581[1686855311972972961,1686856182783783770] 1686935546.05s 6mb|------------------L0.581------------------| "
- - "L0.582[1686856182783783771,1686857053594594580] 1686935546.05s 6mb |------------------L0.582------------------| "
- - "L0.468[1686855311972972961,1686856182783783770] 1686935742.51s 10mb|------------------L0.468------------------| "
- - "L0.469[1686856182783783771,1686857053594594580] 1686935742.51s 10mb |------------------L0.469------------------| "
- - "**** 2 Output Files (parquet_file_id not yet assigned), 139mb total:"
- - "L0 "
- - "L0.?[1686855311972972961,1686856564304278603] 1686935742.51s 100mb|-----------------------------L0.?-----------------------------| "
- - "L0.?[1686856564304278604,1686857053594594580] 1686935742.51s 39mb |---------L0.?----------| "
- - "Committing partition 1:"
- - " Soft Deleting 20 files: L0.333, L0.442, L0.443, L0.468, L0.469, L0.496, L0.497, L0.510, L0.511, L0.538, L0.539, L0.581, L0.582, L0.610, L0.611, L0.626, L0.679, L0.680, L0.695, L0.696"
- - " Creating 2 files"
- - "**** Simulation run 96, type=compact(ManySmallFiles). 4 Input Files, 17mb total:"
- - "L0 "
- - "L0.640[1686855311972972961,1686856182783783770] 1686935947.46s 6mb|------------------L0.640------------------| "
- - "L0.641[1686856182783783771,1686857053594594580] 1686935947.46s 6mb |------------------L0.641------------------| "
- - "L0.394[1686855311972972961,1686856182783783770] 1686936871.55s 3mb|------------------L0.394------------------| "
- - "L0.395[1686856182783783771,1686857053594594580] 1686936871.55s 3mb |------------------L0.395------------------| "
- - "**** 1 Output Files (parquet_file_id not yet assigned), 17mb total:"
- - "L0, all files 17mb "
- - "L0.?[1686855311972972961,1686857053594594580] 1686936871.55s|------------------------------------------L0.?------------------------------------------|"
- - "Committing partition 1:"
- - " Soft Deleting 4 files: L0.394, L0.395, L0.640, L0.641"
- - " Creating 1 files"
- - "**** Simulation run 97, type=split(CompactAndSplitOutput(ManySmallFiles))(split_times=[1686858278525917085]). 20 Input Files, 142mb total:"
- - "L0 "
- - "L0.568[1686857053594594581,1686857924405405390] 1686929421.02s 6mb|------------------L0.568------------------| "
- - "L0.569[1686857924405405391,1686858795216216200] 1686929421.02s 6mb |------------------L0.569------------------| "
- - "L0.456[1686857053594594581,1686857924405405390] 1686929712.33s 10mb|------------------L0.456------------------| "
- - "L0.457[1686857924405405391,1686858795216216200] 1686929712.33s 10mb |------------------L0.457------------------| "
- - "L0.664[1686857053594594581,1686857924405405390] 1686929965.33s 6mb|------------------L0.664------------------| "
- - "L0.665[1686857924405405391,1686858795216216200] 1686929965.33s 6mb |------------------L0.665------------------| "
- - "L0.526[1686857053594594581,1686857924405405390] 1686930563.07s 6mb|------------------L0.526------------------| "
- - "L0.527[1686857924405405391,1686858795216216200] 1686930563.07s 6mb |------------------L0.527------------------| "
- - "L0.598[1686857053594594581,1686857924405405390] 1686930780.95s 9mb|------------------L0.598------------------| "
- - "L0.599[1686857924405405391,1686858795216216200] 1686930780.95s 9mb |------------------L0.599------------------| "
- - "L0.554[1686857053594594581,1686857924405405390] 1686931336.08s 6mb|------------------L0.554------------------| "
- - "L0.555[1686857924405405391,1686858795216216200] 1686931336.08s 6mb |------------------L0.555------------------| "
- - "L0.484[1686857053594594581,1686857924405405390] 1686931600.58s 10mb|------------------L0.484------------------| "
- - "L0.485[1686857924405405391,1686858795216216200] 1686931600.58s 10mb |------------------L0.485------------------| "
- - "L0.714[1686857053594594581,1686857924405405390] 1686931893.7s 6mb|------------------L0.714------------------| "
- - "L0.715[1686857924405405391,1686858795216216200] 1686931893.7s 6mb |------------------L0.715------------------| "
- - "L0.428[1686857053594594581,1686857924405405390] 1686932458.05s 10mb|------------------L0.428------------------| "
- - "L0.429[1686857924405405391,1686858119000000000] 1686932458.05s 2mb |-L0.429-| "
- - "L0.729[1686858179000000000,1686858795216216200] 1686932458.05s 7mb |-----------L0.729------------| "
- - "L0.627[1686857053594594581,1686857924405405390] 1686932677.39s 5mb|------------------L0.627------------------| "
- - "**** 2 Output Files (parquet_file_id not yet assigned), 142mb total:"
- - "L0 "
- - "L0.?[1686857053594594581,1686858278525917085] 1686932677.39s 100mb|----------------------------L0.?-----------------------------| "
- - "L0.?[1686858278525917086,1686858795216216200] 1686932677.39s 42mb |----------L0.?----------| "
- - "Committing partition 1:"
- - " Soft Deleting 20 files: L0.428, L0.429, L0.456, L0.457, L0.484, L0.485, L0.526, L0.527, L0.554, L0.555, L0.568, L0.569, L0.598, L0.599, L0.627, L0.664, L0.665, L0.714, L0.715, L0.729"
- - " Creating 2 files"
- - "**** Simulation run 98, type=split(CompactAndSplitOutput(ManySmallFiles))(split_times=[1686858251288003855]). 20 Input Files, 145mb total:"
- - "L0 "
- - "L0.628[1686857924405405391,1686858795216216200] 1686932677.39s 5mb |------------------L0.628------------------| "
- - "L0.540[1686857053594594581,1686857924405405390] 1686933271.57s 6mb|------------------L0.540------------------| "
- - "L0.541[1686857924405405391,1686858795216216200] 1686933271.57s 6mb |------------------L0.541------------------| "
- - "L0.512[1686857053594594581,1686857924405405390] 1686933528.17s 10mb|------------------L0.512------------------| "
- - "L0.513[1686857924405405391,1686858795216216200] 1686933528.17s 10mb |------------------L0.513------------------| "
- - "L0.681[1686857053594594581,1686857924405405390] 1686933830.06s 6mb|------------------L0.681------------------| "
- - "L0.682[1686857924405405391,1686858795216216200] 1686933830.06s 6mb |------------------L0.682------------------| "
- - "L0.444[1686857053594594581,1686857924405405390] 1686934254.96s 8mb|------------------L0.444------------------| "
- - "L0.445[1686857924405405391,1686858795216216200] 1686934254.96s 8mb |------------------L0.445------------------| "
- - "L0.612[1686857053594594581,1686857924405405390] 1686934759.75s 5mb|------------------L0.612------------------| "
- - "L0.613[1686857924405405391,1686858795216216200] 1686934759.75s 5mb |------------------L0.613------------------| "
- - "L0.498[1686857053594594581,1686857924405405390] 1686934966.48s 10mb|------------------L0.498------------------| "
- - "L0.499[1686857924405405391,1686858795216216200] 1686934966.48s 10mb |------------------L0.499------------------| "
- - "L0.697[1686857053594594581,1686857924405405390] 1686935151.54s 6mb|------------------L0.697------------------| "
- - "L0.698[1686857924405405391,1686858795216216200] 1686935151.54s 6mb |------------------L0.698------------------| "
- - "L0.583[1686857053594594581,1686857924405405390] 1686935546.05s 6mb|------------------L0.583------------------| "
- - "L0.584[1686857924405405391,1686858795216216200] 1686935546.05s 6mb |------------------L0.584------------------| "
- - "L0.470[1686857053594594581,1686857924405405390] 1686935742.51s 10mb|------------------L0.470------------------| "
- - "L0.471[1686857924405405391,1686858795216216200] 1686935742.51s 10mb |------------------L0.471------------------| "
- - "L0.642[1686857053594594581,1686857924405405390] 1686935947.46s 6mb|------------------L0.642------------------| "
- - "**** 2 Output Files (parquet_file_id not yet assigned), 145mb total:"
- - "L0 "
- - "L0.?[1686857053594594581,1686858251288003855] 1686935947.46s 100mb|---------------------------L0.?----------------------------| "
- - "L0.?[1686858251288003856,1686858795216216200] 1686935947.46s 45mb |-----------L0.?-----------| "
- - "Committing partition 1:"
- - " Soft Deleting 20 files: L0.444, L0.445, L0.470, L0.471, L0.498, L0.499, L0.512, L0.513, L0.540, L0.541, L0.583, L0.584, L0.612, L0.613, L0.628, L0.642, L0.681, L0.682, L0.697, L0.698"
- - " Creating 2 files"
- - "**** Simulation run 99, type=compact(ManySmallFiles). 3 Input Files, 11mb total:"
- - "L0 "
- - "L0.643[1686857924405405391,1686858795216216200] 1686935947.46s 6mb |------------------L0.643------------------| "
- - "L0.396[1686857053594594581,1686857924405405390] 1686936871.55s 3mb|------------------L0.396------------------| "
- - "L0.397[1686857924405405391,1686858795216216200] 1686936871.55s 3mb |------------------L0.397------------------| "
- - "**** 1 Output Files (parquet_file_id not yet assigned), 11mb total:"
- - "L0, all files 11mb "
- - "L0.?[1686857053594594581,1686858795216216200] 1686936871.55s|------------------------------------------L0.?------------------------------------------|"
- - "Committing partition 1:"
- - " Soft Deleting 3 files: L0.396, L0.397, L0.643"
- - " Creating 1 files"
- - "**** Simulation run 100, type=split(CompactAndSplitOutput(ManySmallFiles))(split_times=[1686859969989511638]). 20 Input Files, 148mb total:"
- - "L0 "
- - "L0.570[1686858795216216201,1686859666027027010] 1686929421.02s 6mb|------------------L0.570------------------| "
- - "L0.571[1686859666027027011,1686860536837837820] 1686929421.02s 6mb |------------------L0.571------------------| "
- - "L0.458[1686858795216216201,1686859666027027010] 1686929712.33s 10mb|------------------L0.458------------------| "
- - "L0.459[1686859666027027011,1686860536837837820] 1686929712.33s 10mb |------------------L0.459------------------| "
- - "L0.666[1686858795216216201,1686859666027027010] 1686929965.33s 6mb|------------------L0.666------------------| "
- - "L0.667[1686859666027027011,1686860536837837820] 1686929965.33s 6mb |------------------L0.667------------------| "
- - "L0.528[1686858795216216201,1686859666027027010] 1686930563.07s 6mb|------------------L0.528------------------| "
- - "L0.529[1686859666027027011,1686860536837837820] 1686930563.07s 6mb |------------------L0.529------------------| "
- - "L0.600[1686858795216216201,1686859666027027010] 1686930780.95s 9mb|------------------L0.600------------------| "
- - "L0.601[1686859666027027011,1686860536837837820] 1686930780.95s 9mb |------------------L0.601------------------| "
- - "L0.556[1686858795216216201,1686859666027027010] 1686931336.08s 6mb|------------------L0.556------------------| "
- - "L0.557[1686859666027027011,1686860536837837820] 1686931336.08s 6mb |------------------L0.557------------------| "
- - "L0.486[1686858795216216201,1686859666027027010] 1686931600.58s 10mb|------------------L0.486------------------| "
- - "L0.487[1686859666027027011,1686860536837837820] 1686931600.58s 10mb |------------------L0.487------------------| "
- - "L0.716[1686858795216216201,1686859666027027010] 1686931893.7s 6mb|------------------L0.716------------------| "
- - "L0.717[1686859666027027011,1686860536837837820] 1686931893.7s 6mb |------------------L0.717------------------| "
- - "L0.730[1686858795216216201,1686859666027027010] 1686932458.05s 10mb|------------------L0.730------------------| "
- - "L0.731[1686859666027027011,1686860536837837820] 1686932458.05s 10mb |------------------L0.731------------------| "
- - "L0.629[1686858795216216201,1686859666027027010] 1686932677.39s 5mb|------------------L0.629------------------| "
- - "L0.630[1686859666027027011,1686860536837837820] 1686932677.39s 5mb |------------------L0.630------------------| "
- - "**** 2 Output Files (parquet_file_id not yet assigned), 148mb total:"
- - "L0 "
- - "L0.?[1686858795216216201,1686859969989511638] 1686932677.39s 100mb|---------------------------L0.?---------------------------| "
- - "L0.?[1686859969989511639,1686860536837837820] 1686932677.39s 48mb |-----------L0.?------------| "
- - "Committing partition 1:"
- - " Soft Deleting 20 files: L0.458, L0.459, L0.486, L0.487, L0.528, L0.529, L0.556, L0.557, L0.570, L0.571, L0.600, L0.601, L0.629, L0.630, L0.666, L0.667, L0.716, L0.717, L0.730, L0.731"
- - " Creating 2 files"
- - "**** Simulation run 101, type=split(CompactAndSplitOutput(ManySmallFiles))(split_times=[1686859988431489230]). 20 Input Files, 146mb total:"
- - "L0 "
- - "L0.542[1686858795216216201,1686859666027027010] 1686933271.57s 6mb|------------------L0.542------------------| "
- - "L0.543[1686859666027027011,1686860536837837820] 1686933271.57s 6mb |------------------L0.543------------------| "
- - "L0.514[1686858795216216201,1686859666027027010] 1686933528.17s 10mb|------------------L0.514------------------| "
- - "L0.515[1686859666027027011,1686860536837837820] 1686933528.17s 10mb |------------------L0.515------------------| "
- - "L0.683[1686858795216216201,1686859666027027010] 1686933830.06s 6mb|------------------L0.683------------------| "
- - "L0.684[1686859666027027011,1686860536837837820] 1686933830.06s 6mb |------------------L0.684------------------| "
- - "L0.446[1686858795216216201,1686859666027027010] 1686934254.96s 8mb|------------------L0.446------------------| "
- - "L0.447[1686859666027027011,1686860536837837820] 1686934254.96s 8mb |------------------L0.447------------------| "
- - "L0.614[1686858795216216201,1686859666027027010] 1686934759.75s 5mb|------------------L0.614------------------| "
- - "L0.615[1686859666027027011,1686860536837837820] 1686934759.75s 5mb |------------------L0.615------------------| "
- - "L0.500[1686858795216216201,1686859666027027010] 1686934966.48s 10mb|------------------L0.500------------------| "
- - "L0.501[1686859666027027011,1686860536837837820] 1686934966.48s 10mb |------------------L0.501------------------| "
- - "L0.699[1686858795216216201,1686859666027027010] 1686935151.54s 6mb|------------------L0.699------------------| "
- - "L0.700[1686859666027027011,1686860536837837820] 1686935151.54s 6mb |------------------L0.700------------------| "
- - "L0.585[1686858795216216201,1686859666027027010] 1686935546.05s 6mb|------------------L0.585------------------| "
- - "L0.586[1686859666027027011,1686860536837837820] 1686935546.05s 6mb |------------------L0.586------------------| "
- - "L0.472[1686858795216216201,1686859666027027010] 1686935742.51s 10mb|------------------L0.472------------------| "
- - "L0.473[1686859666027027011,1686860536837837820] 1686935742.51s 10mb |------------------L0.473------------------| "
- - "L0.644[1686858795216216201,1686859666027027010] 1686935947.46s 6mb|------------------L0.644------------------| "
- - "L0.645[1686859666027027011,1686860536837837820] 1686935947.46s 6mb |------------------L0.645------------------| "
- - "**** 2 Output Files (parquet_file_id not yet assigned), 146mb total:"
- - "L0 "
- - "L0.?[1686858795216216201,1686859988431489230] 1686935947.46s 100mb|---------------------------L0.?----------------------------| "
- - "L0.?[1686859988431489231,1686860536837837820] 1686935947.46s 46mb |-----------L0.?-----------| "
- - "Committing partition 1:"
- - " Soft Deleting 20 files: L0.446, L0.447, L0.472, L0.473, L0.500, L0.501, L0.514, L0.515, L0.542, L0.543, L0.585, L0.586, L0.614, L0.615, L0.644, L0.645, L0.683, L0.684, L0.699, L0.700"
- - " Creating 2 files"
- - "**** Simulation run 102, type=compact(ManySmallFiles). 2 Input Files, 6mb total:"
- - "L0, all files 3mb "
- - "L0.398[1686858795216216201,1686859666027027010] 1686936871.55s|------------------L0.398------------------| "
- - "L0.399[1686859666027027011,1686860536837837820] 1686936871.55s |------------------L0.399------------------| "
- - "**** 1 Output Files (parquet_file_id not yet assigned), 6mb total:"
- - "L0, all files 6mb "
- - "L0.?[1686858795216216201,1686860536837837820] 1686936871.55s|------------------------------------------L0.?------------------------------------------|"
- - "Committing partition 1:"
- - " Soft Deleting 2 files: L0.398, L0.399"
- - " Creating 1 files"
- - "**** Simulation run 103, type=split(CompactAndSplitOutput(ManySmallFiles))(split_times=[1686861711611133258]). 20 Input Files, 148mb total:"
- - "L0 "
- - "L0.572[1686860536837837821,1686861407648648630] 1686929421.02s 6mb|------------------L0.572------------------| "
- - "L0.573[1686861407648648631,1686862278459459440] 1686929421.02s 6mb |------------------L0.573------------------| "
- - "L0.460[1686860536837837821,1686861407648648630] 1686929712.33s 10mb|------------------L0.460------------------| "
- - "L0.461[1686861407648648631,1686862278459459440] 1686929712.33s 10mb |------------------L0.461------------------| "
- - "L0.668[1686860536837837821,1686861407648648630] 1686929965.33s 6mb|------------------L0.668------------------| "
- - "L0.669[1686861407648648631,1686862278459459440] 1686929965.33s 6mb |------------------L0.669------------------| "
- - "L0.530[1686860536837837821,1686861407648648630] 1686930563.07s 6mb|------------------L0.530------------------| "
- - "L0.531[1686861407648648631,1686862278459459440] 1686930563.07s 6mb |------------------L0.531------------------| "
- - "L0.602[1686860536837837821,1686861407648648630] 1686930780.95s 9mb|------------------L0.602------------------| "
- - "L0.603[1686861407648648631,1686862278459459440] 1686930780.95s 9mb |------------------L0.603------------------| "
- - "L0.558[1686860536837837821,1686861407648648630] 1686931336.08s 6mb|------------------L0.558------------------| "
- - "L0.559[1686861407648648631,1686862278459459440] 1686931336.08s 6mb |------------------L0.559------------------| "
- - "L0.488[1686860536837837821,1686861407648648630] 1686931600.58s 10mb|------------------L0.488------------------| "
- - "L0.489[1686861407648648631,1686862278459459440] 1686931600.58s 10mb |------------------L0.489------------------| "
- - "L0.718[1686860536837837821,1686861407648648630] 1686931893.7s 6mb|------------------L0.718------------------| "
- - "L0.719[1686861407648648631,1686862278459459440] 1686931893.7s 6mb |------------------L0.719------------------| "
- - "L0.732[1686860536837837821,1686861407648648630] 1686932458.05s 10mb|------------------L0.732------------------| "
- - "L0.733[1686861407648648631,1686862278459459440] 1686932458.05s 10mb |------------------L0.733------------------| "
- - "L0.631[1686860536837837821,1686861407648648630] 1686932677.39s 5mb|------------------L0.631------------------| "
- - "L0.632[1686861407648648631,1686862278459459440] 1686932677.39s 5mb |------------------L0.632------------------| "
- - "**** 2 Output Files (parquet_file_id not yet assigned), 148mb total:"
- - "L0 "
- - "L0.?[1686860536837837821,1686861711611133258] 1686932677.39s 100mb|---------------------------L0.?---------------------------| "
- - "L0.?[1686861711611133259,1686862278459459440] 1686932677.39s 48mb |-----------L0.?------------| "
- - "**** Simulation run 104, type=compact(ManySmallFiles). 2 Input Files, 6mb total:"
- - "L0, all files 3mb "
- - "L0.400[1686860536837837821,1686861407648648630] 1686936871.55s|------------------L0.400------------------| "
- - "L0.401[1686861407648648631,1686862278459459440] 1686936871.55s |------------------L0.401------------------| "
- - "**** 1 Output Files (parquet_file_id not yet assigned), 6mb total:"
- - "L0, all files 6mb "
- - "L0.?[1686860536837837821,1686862278459459440] 1686936871.55s|------------------------------------------L0.?------------------------------------------|"
- - "Committing partition 1:"
- - " Soft Deleting 2 files: L0.400, L0.401"
- - " Creating 1 files"
- - "**** Simulation run 105, type=split(CompactAndSplitOutput(ManySmallFiles))(split_times=[1686863453232754878]). 20 Input Files, 148mb total:"
- - "L0 "
- - "L0.574[1686862278459459441,1686863149270270250] 1686929421.02s 6mb|------------------L0.574------------------| "
- - "L0.575[1686863149270270251,1686864020081081060] 1686929421.02s 6mb |------------------L0.575------------------| "
- - "L0.462[1686862278459459441,1686863149270270250] 1686929712.33s 10mb|------------------L0.462------------------| "
- - "L0.463[1686863149270270251,1686864020081081060] 1686929712.33s 10mb |------------------L0.463------------------| "
- - "L0.670[1686862278459459441,1686863149270270250] 1686929965.33s 6mb|------------------L0.670------------------| "
- - "L0.671[1686863149270270251,1686864020081081060] 1686929965.33s 6mb |------------------L0.671------------------| "
- - "L0.532[1686862278459459441,1686863149270270250] 1686930563.07s 6mb|------------------L0.532------------------| "
- - "L0.533[1686863149270270251,1686864020081081060] 1686930563.07s 6mb |------------------L0.533------------------| "
- - "L0.604[1686862278459459441,1686863149270270250] 1686930780.95s 9mb|------------------L0.604------------------| "
- - "L0.605[1686863149270270251,1686864020081081060] 1686930780.95s 9mb |------------------L0.605------------------| "
- - "L0.560[1686862278459459441,1686863149270270250] 1686931336.08s 6mb|------------------L0.560------------------| "
- - "L0.561[1686863149270270251,1686864020081081060] 1686931336.08s 6mb |------------------L0.561------------------| "
- - "L0.490[1686862278459459441,1686863149270270250] 1686931600.58s 10mb|------------------L0.490------------------| "
- - "L0.491[1686863149270270251,1686864020081081060] 1686931600.58s 10mb |------------------L0.491------------------| "
- - "L0.720[1686862278459459441,1686863149270270250] 1686931893.7s 6mb|------------------L0.720------------------| "
- - "L0.721[1686863149270270251,1686864020081081060] 1686931893.7s 6mb |------------------L0.721------------------| "
- - "L0.734[1686862278459459441,1686863149270270250] 1686932458.05s 10mb|------------------L0.734------------------| "
- - "L0.735[1686863149270270251,1686864020081081060] 1686932458.05s 10mb |------------------L0.735------------------| "
- - "L0.633[1686862278459459441,1686863149270270250] 1686932677.39s 5mb|------------------L0.633------------------| "
- - "L0.634[1686863149270270251,1686864020081081060] 1686932677.39s 5mb |------------------L0.634------------------| "
- - "**** 2 Output Files (parquet_file_id not yet assigned), 148mb total:"
- - "L0 "
- - "L0.?[1686862278459459441,1686863453232754878] 1686932677.39s 100mb|---------------------------L0.?---------------------------| "
- - "L0.?[1686863453232754879,1686864020081081060] 1686932677.39s 48mb |-----------L0.?------------| "
- - "Committing partition 1:"
- - " Soft Deleting 20 files: L0.462, L0.463, L0.490, L0.491, L0.532, L0.533, L0.560, L0.561, L0.574, L0.575, L0.604, L0.605, L0.633, L0.634, L0.670, L0.671, L0.720, L0.721, L0.734, L0.735"
- - " Creating 2 files"
- - "**** Simulation run 106, type=split(CompactAndSplitOutput(ManySmallFiles))(split_times=[1686863528879205201]). 20 Input Files, 139mb total:"
- - "L0 "
- - "L0.546[1686862278459459441,1686863149270270250] 1686933271.57s 6mb|------------------L0.546------------------| "
- - "L0.547[1686863149270270251,1686864020081081060] 1686933271.57s 6mb |------------------L0.547------------------| "
- - "L0.518[1686862278459459441,1686863149270270250] 1686933528.17s 10mb|------------------L0.518------------------| "
- - "L0.519[1686863149270270251,1686864020081081060] 1686933528.17s 10mb |------------------L0.519------------------| "
- - "L0.687[1686862278459459441,1686863149270270250] 1686933830.06s 6mb|------------------L0.687------------------| "
- - "L0.688[1686863149270270251,1686864020081081060] 1686933830.06s 6mb |------------------L0.688------------------| "
- - "L0.450[1686862278459459441,1686862859000000000] 1686934254.96s 5mb|-----------L0.450-----------| "
- - "L0.745[1686862919000000000,1686863149270270250] 1686934254.96s 2mb |-L0.745--| "
- - "L0.746[1686863149270270251,1686864020081081060] 1686934254.96s 8mb |------------------L0.746------------------| "
- - "L0.618[1686862278459459441,1686863149270270250] 1686934759.75s 5mb|------------------L0.618------------------| "
- - "L0.619[1686863149270270251,1686864020081081060] 1686934759.75s 5mb |------------------L0.619------------------| "
- - "L0.504[1686862278459459441,1686863149270270250] 1686934966.48s 10mb|------------------L0.504------------------| "
- - "L0.505[1686863149270270251,1686864020081081060] 1686934966.48s 10mb |------------------L0.505------------------| "
- - "L0.703[1686862278459459441,1686863149270270250] 1686935151.54s 6mb|------------------L0.703------------------| "
- - "L0.704[1686863149270270251,1686864020081081060] 1686935151.54s 6mb |------------------L0.704------------------| "
- - "L0.589[1686862278459459441,1686863149270270250] 1686935546.05s 6mb|------------------L0.589------------------| "
- - "L0.590[1686863149270270251,1686864020081081060] 1686935546.05s 6mb |------------------L0.590------------------| "
- - "L0.476[1686862278459459441,1686863149270270250] 1686935742.51s 10mb|------------------L0.476------------------| "
- - "L0.477[1686863149270270251,1686864020081081060] 1686935742.51s 10mb |------------------L0.477------------------| "
- - "L0.648[1686862278459459441,1686863149270270250] 1686935947.46s 6mb|------------------L0.648------------------| "
- - "**** 2 Output Files (parquet_file_id not yet assigned), 139mb total:"
- - "L0 "
- - "L0.?[1686862278459459441,1686863528879205201] 1686935947.46s 100mb|-----------------------------L0.?-----------------------------| "
- - "L0.?[1686863528879205202,1686864020081081060] 1686935947.46s 39mb |---------L0.?----------| "
- - "Committing partition 1:"
- - " Soft Deleting 20 files: L0.450, L0.476, L0.477, L0.504, L0.505, L0.518, L0.519, L0.546, L0.547, L0.589, L0.590, L0.618, L0.619, L0.648, L0.687, L0.688, L0.703, L0.704, L0.745, L0.746"
- - " Creating 2 files"
- - "**** Simulation run 107, type=compact(ManySmallFiles). 3 Input Files, 11mb total:"
- - "L0 "
- - "L0.649[1686863149270270251,1686864020081081060] 1686935947.46s 6mb |------------------L0.649------------------| "
- - "L0.402[1686862278459459441,1686863149270270250] 1686936871.55s 3mb|------------------L0.402------------------| "
- - "L0.403[1686863149270270251,1686864020081081060] 1686936871.55s 3mb |------------------L0.403------------------| "
- - "**** 1 Output Files (parquet_file_id not yet assigned), 11mb total:"
- - "L0, all files 11mb "
- - "L0.?[1686862278459459441,1686864020081081060] 1686936871.55s|------------------------------------------L0.?------------------------------------------|"
- - "Committing partition 1:"
- - " Soft Deleting 3 files: L0.402, L0.403, L0.649"
- - " Creating 1 files"
- - "**** Simulation run 108, type=split(CompactAndSplitOutput(ManySmallFiles))(split_times=[1686865413378378356]). 20 Input Files, 98mb total:"
- - "L0 "
- - "L0.576[1686864020081081061,1686864890891891870] 1686929421.02s 6mb|------------------L0.576------------------| "
- - "L0.577[1686864890891891871,1686865259000000000] 1686929421.02s 2mb |-----L0.577------| "
- - "L0.851[1686865319000000000,1686865761702702680] 1686929421.02s 3mb |-------L0.851-------| "
- - "L0.464[1686864020081081061,1686864359000000000] 1686929712.33s 4mb|----L0.464-----| "
- - "L0.764[1686864419000000000,1686864890891891870] 1686929712.33s 4mb |--------L0.764--------| "
- - "L0.765[1686864890891891871,1686865761702702680] 1686929712.33s 7mb |------------------L0.765------------------| "
- - "L0.672[1686864020081081061,1686864890891891870] 1686929965.33s 6mb|------------------L0.672------------------| "
- - "L0.673[1686864890891891871,1686865761702702680] 1686929965.33s 6mb |------------------L0.673------------------| "
- - "L0.534[1686864020081081061,1686864659000000000] 1686930563.07s 4mb|------------L0.534-------------| "
- - "L0.819[1686864719000000000,1686864890891891870] 1686930563.07s 1mb |L0.819| "
- - "L0.820[1686864890891891871,1686865761702702680] 1686930563.07s 5mb |------------------L0.820------------------| "
- - "L0.606[1686864020081081061,1686864890891891870] 1686930780.95s 9mb|------------------L0.606------------------| "
- - "L0.607[1686864890891891871,1686865439000000000] 1686930780.95s 6mb |----------L0.607----------| "
- - "L0.871[1686865499000000000,1686865761702702680] 1686930780.95s 3mb |--L0.871---| "
- - "L0.562[1686864020081081061,1686864839000000000] 1686931336.08s 6mb|-----------------L0.562-----------------| "
- - "L0.841[1686864899000000000,1686865761702702680] 1686931336.08s 5mb |------------------L0.841------------------| "
- - "L0.492[1686864020081081061,1686864419000000000] 1686931600.58s 5mb|------L0.492------| "
- - "L0.786[1686864479000000000,1686864890891891870] 1686931600.58s 4mb |------L0.786-------| "
- - "L0.787[1686864890891891871,1686865761702702680] 1686931600.58s 8mb |------------------L0.787------------------| "
- - "L0.722[1686864020081081061,1686864890891891870] 1686931893.7s 6mb|------------------L0.722------------------| "
- - "**** 2 Output Files (parquet_file_id not yet assigned), 98mb total:"
- - "L0 "
- - "L0.?[1686864020081081061,1686865413378378356] 1686931893.7s 78mb|--------------------------------L0.?---------------------------------| "
- - "L0.?[1686865413378378357,1686865761702702680] 1686931893.7s 20mb |-----L0.?------| "
- - "Committing partition 1:"
- - " Soft Deleting 20 files: L0.464, L0.492, L0.534, L0.562, L0.576, L0.577, L0.606, L0.607, L0.672, L0.673, L0.722, L0.764, L0.765, L0.786, L0.787, L0.819, L0.820, L0.841, L0.851, L0.871"
- - " Creating 2 files"
- - "**** Simulation run 109, type=split(CompactAndSplitOutput(ManySmallFiles))(split_times=[1686865413378378356]). 20 Input Files, 117mb total:"
- - "L0 "
- - "L0.723[1686864890891891871,1686865761702702680] 1686931893.7s 6mb |------------------L0.723------------------| "
- - "L0.736[1686864020081081061,1686864890891891870] 1686932458.05s 10mb|------------------L0.736------------------| "
- - "L0.737[1686864890891891871,1686865761702702680] 1686932458.05s 10mb |------------------L0.737------------------| "
- - "L0.635[1686864020081081061,1686864890891891870] 1686932677.39s 5mb|------------------L0.635------------------| "
- - "L0.636[1686864890891891871,1686865761702702680] 1686932677.39s 5mb |------------------L0.636------------------| "
- - "L0.548[1686864020081081061,1686864659000000000] 1686933271.57s 4mb|------------L0.548-------------| "
- - "L0.830[1686864719000000000,1686864890891891870] 1686933271.57s 1mb |L0.830| "
- - "L0.831[1686864890891891871,1686865761702702680] 1686933271.57s 5mb |------------------L0.831------------------| "
- - "L0.520[1686864020081081061,1686864599000000000] 1686933528.17s 7mb|----------L0.520-----------| "
- - "L0.808[1686864659000000000,1686864890891891870] 1686933528.17s 2mb |-L0.808--| "
- - "L0.809[1686864890891891871,1686865761702702680] 1686933528.17s 7mb |------------------L0.809------------------| "
- - "L0.689[1686864020081081061,1686864890891891870] 1686933830.06s 6mb|------------------L0.689------------------| "
- - "L0.690[1686864890891891871,1686865761702702680] 1686933830.06s 6mb |------------------L0.690------------------| "
- - "L0.747[1686864020081081061,1686864890891891870] 1686934254.96s 8mb|------------------L0.747------------------| "
- - "L0.748[1686864890891891871,1686865761702702680] 1686934254.96s 8mb |------------------L0.748------------------| "
- - "L0.620[1686864020081081061,1686864890891891870] 1686934759.75s 5mb|------------------L0.620------------------| "
- - "L0.621[1686864890891891871,1686865761702702680] 1686934759.75s 5mb |------------------L0.621------------------| "
- - "L0.506[1686864020081081061,1686864419000000000] 1686934966.48s 5mb|------L0.506------| "
- - "L0.797[1686864479000000000,1686864890891891870] 1686934966.48s 3mb |------L0.797-------| "
- - "L0.798[1686864890891891871,1686865761702702680] 1686934966.48s 7mb |------------------L0.798------------------| "
- - "**** 2 Output Files (parquet_file_id not yet assigned), 117mb total:"
- - "L0 "
- - "L0.?[1686864020081081061,1686865413378378356] 1686934966.48s 94mb|--------------------------------L0.?---------------------------------| "
- - "L0.?[1686865413378378357,1686865761702702680] 1686934966.48s 23mb |-----L0.?------| "
- - "Committing partition 1:"
- - " Soft Deleting 20 files: L0.506, L0.520, L0.548, L0.620, L0.621, L0.635, L0.636, L0.689, L0.690, L0.723, L0.736, L0.737, L0.747, L0.748, L0.797, L0.798, L0.808, L0.809, L0.830, L0.831"
- - " Creating 2 files"
- - "**** Simulation run 110, type=split(CompactAndSplitOutput(ManySmallFiles))(split_times=[1686865413378378356]). 12 Input Files, 55mb total:"
- - "L0 "
- - "L0.705[1686864020081081061,1686864890891891870] 1686935151.54s 6mb|------------------L0.705------------------| "
- - "L0.706[1686864890891891871,1686865761702702680] 1686935151.54s 6mb |------------------L0.706------------------| "
- - "L0.591[1686864020081081061,1686864890891891870] 1686935546.05s 6mb|------------------L0.591------------------| "
- - "L0.592[1686864890891891871,1686865259000000000] 1686935546.05s 2mb |-----L0.592------| "
- - "L0.861[1686865319000000000,1686865761702702680] 1686935546.05s 3mb |-------L0.861-------| "
- - "L0.478[1686864020081081061,1686864359000000000] 1686935742.51s 4mb|----L0.478-----| "
- - "L0.775[1686864419000000000,1686864890891891870] 1686935742.51s 4mb |--------L0.775--------| "
- - "L0.776[1686864890891891871,1686865761702702680] 1686935742.51s 7mb |------------------L0.776------------------| "
- - "L0.650[1686864020081081061,1686864890891891870] 1686935947.46s 6mb|------------------L0.650------------------| "
- - "L0.651[1686864890891891871,1686865761702702680] 1686935947.46s 6mb |------------------L0.651------------------| "
- - "L0.404[1686864020081081061,1686864890891891870] 1686936871.55s 3mb|------------------L0.404------------------| "
- - "L0.405[1686864890891891871,1686865761702702680] 1686936871.55s 3mb |------------------L0.405------------------| "
- - "**** 2 Output Files (parquet_file_id not yet assigned), 55mb total:"
- - "L0 "
- - "L0.?[1686864020081081061,1686865413378378356] 1686936871.55s 44mb|--------------------------------L0.?---------------------------------| "
- - "L0.?[1686865413378378357,1686865761702702680] 1686936871.55s 11mb |-----L0.?------| "
- - "Committing partition 1:"
- - " Soft Deleting 12 files: L0.404, L0.405, L0.478, L0.591, L0.592, L0.650, L0.651, L0.705, L0.706, L0.775, L0.776, L0.861"
- - " Creating 2 files"
- - "**** Simulation run 111, type=split(CompactAndSplitOutput(ManySmallFiles))(split_times=[1686867209822490496]). 20 Input Files, 120mb total:"
- - "L0 "
- - "L0.852[1686865761702702681,1686866632513513490] 1686929421.02s 5mb|------------------L0.852------------------| "
- - "L0.853[1686866632513513491,1686867503324324300] 1686929421.02s 5mb |------------------L0.853------------------| "
- - "L0.766[1686865761702702681,1686866632513513490] 1686929712.33s 7mb|------------------L0.766------------------| "
- - "L0.767[1686866632513513491,1686867503324324300] 1686929712.33s 7mb |------------------L0.767------------------| "
- - "L0.674[1686865761702702681,1686866632513513490] 1686929965.33s 6mb|------------------L0.674------------------| "
- - "L0.675[1686866632513513491,1686867503324324300] 1686929965.33s 6mb |------------------L0.675------------------| "
- - "L0.821[1686865761702702681,1686866632513513490] 1686930563.07s 5mb|------------------L0.821------------------| "
- - "L0.822[1686866632513513491,1686867503324324300] 1686930563.07s 5mb |------------------L0.822------------------| "
- - "L0.872[1686865761702702681,1686866632513513490] 1686930780.95s 9mb|------------------L0.872------------------| "
- - "L0.873[1686866632513513491,1686867503324324300] 1686930780.95s 9mb |------------------L0.873------------------| "
- - "L0.842[1686865761702702681,1686866632513513490] 1686931336.08s 5mb|------------------L0.842------------------| "
- - "L0.843[1686866632513513491,1686867503324324300] 1686931336.08s 5mb |------------------L0.843------------------| "
- - "L0.788[1686865761702702681,1686866632513513490] 1686931600.58s 8mb|------------------L0.788------------------| "
- - "L0.789[1686866632513513491,1686867503324324300] 1686931600.58s 8mb |------------------L0.789------------------| "
- - "L0.724[1686865761702702681,1686866632513513490] 1686931893.7s 6mb|------------------L0.724------------------| "
- - "L0.725[1686866632513513491,1686867503324324300] 1686931893.7s 6mb |------------------L0.725------------------| "
- - "L0.738[1686865761702702681,1686865979000000000] 1686932458.05s 3mb|-L0.738--| "
- - "L0.637[1686865761702702681,1686866632513513490] 1686932677.39s 5mb|------------------L0.637------------------| "
- - "L0.638[1686866632513513491,1686867059000000000] 1686932677.39s 3mb |-------L0.638-------| "
- - "L0.890[1686867119000000000,1686867503324324300] 1686932677.39s 6mb |-----L0.890------| "
- - "**** 2 Output Files (parquet_file_id not yet assigned), 120mb total:"
- - "L0 "
- - "L0.?[1686865761702702681,1686867209822490496] 1686932677.39s 100mb|----------------------------------L0.?----------------------------------| "
- - "L0.?[1686867209822490497,1686867503324324300] 1686932677.39s 20mb |----L0.?-----| "
- - "Committing partition 1:"
- - " Soft Deleting 20 files: L0.637, L0.638, L0.674, L0.675, L0.724, L0.725, L0.738, L0.766, L0.767, L0.788, L0.789, L0.821, L0.822, L0.842, L0.843, L0.852, L0.853, L0.872, L0.873, L0.890"
- - " Creating 2 files"
- - "Committing partition 1:"
- - " Soft Deleting 20 files: L0.460, L0.461, L0.488, L0.489, L0.530, L0.531, L0.558, L0.559, L0.572, L0.573, L0.602, L0.603, L0.631, L0.632, L0.668, L0.669, L0.718, L0.719, L0.732, L0.733"
- - " Creating 2 files"
- - "**** Simulation run 112, type=split(CompactAndSplitOutput(ManySmallFiles))(split_times=[1686861730053110850]). 20 Input Files, 146mb total:"
- - "L0 "
- - "L0.544[1686860536837837821,1686861407648648630] 1686933271.57s 6mb|------------------L0.544------------------| "
- - "L0.545[1686861407648648631,1686862278459459440] 1686933271.57s 6mb |------------------L0.545------------------| "
- - "L0.516[1686860536837837821,1686861407648648630] 1686933528.17s 10mb|------------------L0.516------------------| "
- - "L0.517[1686861407648648631,1686862278459459440] 1686933528.17s 10mb |------------------L0.517------------------| "
- - "L0.685[1686860536837837821,1686861407648648630] 1686933830.06s 6mb|------------------L0.685------------------| "
- - "L0.686[1686861407648648631,1686862278459459440] 1686933830.06s 6mb |------------------L0.686------------------| "
- - "L0.448[1686860536837837821,1686861407648648630] 1686934254.96s 8mb|------------------L0.448------------------| "
- - "L0.449[1686861407648648631,1686862278459459440] 1686934254.96s 8mb |------------------L0.449------------------| "
- - "L0.616[1686860536837837821,1686861407648648630] 1686934759.75s 5mb|------------------L0.616------------------| "
- - "L0.617[1686861407648648631,1686862278459459440] 1686934759.75s 5mb |------------------L0.617------------------| "
- - "L0.502[1686860536837837821,1686861407648648630] 1686934966.48s 10mb|------------------L0.502------------------| "
- - "L0.503[1686861407648648631,1686862278459459440] 1686934966.48s 10mb |------------------L0.503------------------| "
- - "L0.701[1686860536837837821,1686861407648648630] 1686935151.54s 6mb|------------------L0.701------------------| "
- - "L0.702[1686861407648648631,1686862278459459440] 1686935151.54s 6mb |------------------L0.702------------------| "
- - "L0.587[1686860536837837821,1686861407648648630] 1686935546.05s 6mb|------------------L0.587------------------| "
- - "L0.588[1686861407648648631,1686862278459459440] 1686935546.05s 6mb |------------------L0.588------------------| "
- - "L0.474[1686860536837837821,1686861407648648630] 1686935742.51s 10mb|------------------L0.474------------------| "
- - "L0.475[1686861407648648631,1686862278459459440] 1686935742.51s 10mb |------------------L0.475------------------| "
- - "L0.646[1686860536837837821,1686861407648648630] 1686935947.46s 6mb|------------------L0.646------------------| "
- - "L0.647[1686861407648648631,1686862278459459440] 1686935947.46s 6mb |------------------L0.647------------------| "
- - "**** 2 Output Files (parquet_file_id not yet assigned), 146mb total:"
- - "L0 "
- - "L0.?[1686860536837837821,1686861730053110850] 1686935947.46s 100mb|---------------------------L0.?----------------------------| "
- - "L0.?[1686861730053110851,1686862278459459440] 1686935947.46s 46mb |-----------L0.?-----------| "
- - "Committing partition 1:"
- - " Soft Deleting 20 files: L0.448, L0.449, L0.474, L0.475, L0.502, L0.503, L0.516, L0.517, L0.544, L0.545, L0.587, L0.588, L0.616, L0.617, L0.646, L0.647, L0.685, L0.686, L0.701, L0.702"
- - " Creating 2 files"
- - "**** Simulation run 113, type=split(CompactAndSplitOutput(ManySmallFiles))(split_times=[1686867154999999976]). 20 Input Files, 119mb total:"
- - "L0 "
- - "L0.832[1686865761702702681,1686866632513513490] 1686933271.57s 5mb|------------------L0.832------------------| "
- - "L0.833[1686866632513513491,1686867503324324300] 1686933271.57s 5mb |------------------L0.833------------------| "
- - "L0.810[1686865761702702681,1686866632513513490] 1686933528.17s 7mb|------------------L0.810------------------| "
- - "L0.811[1686866632513513491,1686867503324324300] 1686933528.17s 7mb |------------------L0.811------------------| "
- - "L0.691[1686865761702702681,1686866632513513490] 1686933830.06s 6mb|------------------L0.691------------------| "
- - "L0.692[1686866632513513491,1686867503324324300] 1686933830.06s 6mb |------------------L0.692------------------| "
- - "L0.749[1686865761702702681,1686866632513513490] 1686934254.96s 8mb|------------------L0.749------------------| "
- - "L0.750[1686866632513513491,1686867503324324300] 1686934254.96s 8mb |------------------L0.750------------------| "
- - "L0.622[1686865761702702681,1686866039000000000] 1686934759.75s 2mb|---L0.622---| "
- - "L0.881[1686866099000000000,1686866632513513490] 1686934759.75s 3mb |---------L0.881----------| "
- - "L0.882[1686866632513513491,1686867503324324300] 1686934759.75s 5mb |------------------L0.882------------------| "
- - "L0.799[1686865761702702681,1686866632513513490] 1686934966.48s 7mb|------------------L0.799------------------| "
- - "L0.800[1686866632513513491,1686867503324324300] 1686934966.48s 7mb |------------------L0.800------------------| "
- - "L0.707[1686865761702702681,1686866632513513490] 1686935151.54s 6mb|------------------L0.707------------------| "
- - "L0.708[1686866632513513491,1686867503324324300] 1686935151.54s 6mb |------------------L0.708------------------| "
- - "L0.862[1686865761702702681,1686866632513513490] 1686935546.05s 5mb|------------------L0.862------------------| "
- - "L0.863[1686866632513513491,1686867503324324300] 1686935546.05s 5mb |------------------L0.863------------------| "
- - "L0.777[1686865761702702681,1686866632513513490] 1686935742.51s 7mb|------------------L0.777------------------| "
- - "L0.778[1686866632513513491,1686867503324324300] 1686935742.51s 7mb |------------------L0.778------------------| "
- - "L0.652[1686865761702702681,1686866632513513490] 1686935947.46s 6mb|------------------L0.652------------------| "
- - "**** 2 Output Files (parquet_file_id not yet assigned), 119mb total:"
- - "L0 "
- - "L0.?[1686865761702702681,1686867154999999976] 1686935947.46s 95mb|--------------------------------L0.?---------------------------------| "
- - "L0.?[1686867154999999977,1686867503324324300] 1686935947.46s 24mb |-----L0.?------| "
- - "Committing partition 1:"
- - " Soft Deleting 20 files: L0.622, L0.652, L0.691, L0.692, L0.707, L0.708, L0.749, L0.750, L0.777, L0.778, L0.799, L0.800, L0.810, L0.811, L0.832, L0.833, L0.862, L0.863, L0.881, L0.882"
- - " Creating 2 files"
- - "**** Simulation run 114, type=compact(ManySmallFiles). 3 Input Files, 11mb total:"
- - "L0 "
- - "L0.653[1686866632513513491,1686867503324324300] 1686935947.46s 6mb |------------------L0.653------------------| "
- - "L0.406[1686865761702702681,1686866632513513490] 1686936871.55s 3mb|------------------L0.406------------------| "
- - "L0.407[1686866632513513491,1686867503324324300] 1686936871.55s 3mb |------------------L0.407------------------| "
- - "**** 1 Output Files (parquet_file_id not yet assigned), 11mb total:"
- - "L0, all files 11mb "
- - "L0.?[1686865761702702681,1686867503324324300] 1686936871.55s|------------------------------------------L0.?------------------------------------------|"
- - "Committing partition 1:"
- - " Soft Deleting 3 files: L0.406, L0.407, L0.653"
- - " Creating 1 files"
- - "**** Simulation run 115, type=split(CompactAndSplitOutput(ManySmallFiles))(split_times=[1686868747944483205]). 20 Input Files, 140mb total:"
- - "L0 "
- - "L0.854[1686867503324324301,1686868374135135110] 1686929421.02s 5mb|------------------L0.854------------------| "
- - "L0.855[1686868374135135111,1686869244945945920] 1686929421.02s 5mb |------------------L0.855------------------| "
- - "L0.768[1686867503324324301,1686868374135135110] 1686929712.33s 7mb|------------------L0.768------------------| "
- - "L0.769[1686868374135135111,1686869244945945920] 1686929712.33s 7mb |------------------L0.769------------------| "
- - "L0.676[1686867503324324301,1686868374135135110] 1686929965.33s 6mb|------------------L0.676------------------| "
- - "L0.677[1686868374135135111,1686868739000000000] 1686929965.33s 2mb |-----L0.677-----| "
- - "L0.911[1686868799000000000,1686869244945945920] 1686929965.33s 5mb |-------L0.911--------| "
- - "L0.823[1686867503324324301,1686868374135135110] 1686930563.07s 5mb|------------------L0.823------------------| "
- - "L0.824[1686868374135135111,1686869244945945920] 1686930563.07s 5mb |------------------L0.824------------------| "
- - "L0.874[1686867503324324301,1686868374135135110] 1686930780.95s 9mb|------------------L0.874------------------| "
- - "L0.875[1686868374135135111,1686869244945945920] 1686930780.95s 9mb |------------------L0.875------------------| "
- - "L0.844[1686867503324324301,1686868374135135110] 1686931336.08s 5mb|------------------L0.844------------------| "
- - "L0.845[1686868374135135111,1686869244945945920] 1686931336.08s 5mb |------------------L0.845------------------| "
- - "L0.790[1686867503324324301,1686868374135135110] 1686931600.58s 8mb|------------------L0.790------------------| "
- - "L0.791[1686868374135135111,1686869244945945920] 1686931600.58s 8mb |------------------L0.791------------------| "
- - "L0.726[1686867503324324301,1686868374135135110] 1686931893.7s 6mb|------------------L0.726------------------| "
- - "L0.727[1686868374135135111,1686869244945945920] 1686931893.7s 6mb |------------------L0.727------------------| "
- - "L0.891[1686867503324324301,1686868374135135110] 1686932677.39s 15mb|------------------L0.891------------------| "
- - "L0.892[1686868374135135111,1686869244945945920] 1686932677.39s 15mb |------------------L0.892------------------| "
- - "L0.834[1686867503324324301,1686868374135135110] 1686933271.57s 5mb|------------------L0.834------------------| "
- - "**** 2 Output Files (parquet_file_id not yet assigned), 140mb total:"
- - "L0 "
- - "L0.?[1686867503324324301,1686868747944483205] 1686933271.57s 100mb|-----------------------------L0.?-----------------------------| "
- - "L0.?[1686868747944483206,1686869244945945920] 1686933271.57s 40mb |---------L0.?----------| "
- - "Committing partition 1:"
- - " Soft Deleting 20 files: L0.676, L0.677, L0.726, L0.727, L0.768, L0.769, L0.790, L0.791, L0.823, L0.824, L0.834, L0.844, L0.845, L0.854, L0.855, L0.874, L0.875, L0.891, L0.892, L0.911"
- - " Creating 2 files"
- - "**** Simulation run 116, type=split(CompactAndSplitOutput(ManySmallFiles))(split_times=[1686868896621621596]). 20 Input Files, 116mb total:"
- - "L0 "
- - "L0.835[1686868374135135111,1686869244945945920] 1686933271.57s 5mb |------------------L0.835------------------| "
- - "L0.812[1686867503324324301,1686868374135135110] 1686933528.17s 7mb|------------------L0.812------------------| "
- - "L0.813[1686868374135135111,1686869244945945920] 1686933528.17s 7mb |------------------L0.813------------------| "
- - "L0.693[1686867503324324301,1686868374135135110] 1686933830.06s 6mb|------------------L0.693------------------| "
- - "L0.694[1686868374135135111,1686868859000000000] 1686933830.06s 3mb |--------L0.694---------| "
- - "L0.917[1686868919000000000,1686869244945945920] 1686933830.06s 4mb |----L0.917----| "
- - "L0.751[1686867503324324301,1686868374135135110] 1686934254.96s 8mb|------------------L0.751------------------| "
- - "L0.752[1686868374135135111,1686869244945945920] 1686934254.96s 8mb |------------------L0.752------------------| "
- - "L0.883[1686867503324324301,1686868374135135110] 1686934759.75s 5mb|------------------L0.883------------------| "
- - "L0.884[1686868374135135111,1686869244945945920] 1686934759.75s 5mb |------------------L0.884------------------| "
- - "L0.801[1686867503324324301,1686868374135135110] 1686934966.48s 7mb|------------------L0.801------------------| "
- - "L0.802[1686868374135135111,1686869244945945920] 1686934966.48s 7mb |------------------L0.802------------------| "
- - "L0.709[1686867503324324301,1686868374135135110] 1686935151.54s 6mb|------------------L0.709------------------| "
- - "L0.710[1686868374135135111,1686869244945945920] 1686935151.54s 6mb |------------------L0.710------------------| "
- - "L0.864[1686867503324324301,1686868374135135110] 1686935546.05s 5mb|------------------L0.864------------------| "
- - "L0.865[1686868374135135111,1686869244945945920] 1686935546.05s 5mb |------------------L0.865------------------| "
- - "L0.779[1686867503324324301,1686868374135135110] 1686935742.51s 7mb|------------------L0.779------------------| "
- - "L0.780[1686868374135135111,1686869244945945920] 1686935742.51s 7mb |------------------L0.780------------------| "
- - "L0.654[1686867503324324301,1686868199000000000] 1686935947.46s 5mb|-------------L0.654--------------| "
- - "L0.898[1686868259000000000,1686868374135135110] 1686935947.46s 1mb |L0.898| "
- - "**** 2 Output Files (parquet_file_id not yet assigned), 116mb total:"
- - "L0 "
- - "L0.?[1686867503324324301,1686868896621621596] 1686935947.46s 93mb|--------------------------------L0.?---------------------------------| "
- - "L0.?[1686868896621621597,1686869244945945920] 1686935947.46s 23mb |-----L0.?------| "
- - "Committing partition 1:"
- - " Soft Deleting 20 files: L0.654, L0.693, L0.694, L0.709, L0.710, L0.751, L0.752, L0.779, L0.780, L0.801, L0.802, L0.812, L0.813, L0.835, L0.864, L0.865, L0.883, L0.884, L0.898, L0.917"
- - " Creating 2 files"
- - "**** Simulation run 117, type=compact(ManySmallFiles). 3 Input Files, 17mb total:"
- - "L0 "
- - "L0.899[1686868374135135111,1686869244945945920] 1686935947.46s 11mb |------------------L0.899------------------| "
- - "L0.408[1686867503324324301,1686868374135135110] 1686936871.55s 3mb|------------------L0.408------------------| "
- - "L0.409[1686868374135135111,1686869244945945920] 1686936871.55s 3mb |------------------L0.409------------------| "
- - "**** 1 Output Files (parquet_file_id not yet assigned), 17mb total:"
- - "L0, all files 17mb "
- - "L0.?[1686867503324324301,1686869244945945920] 1686936871.55s|------------------------------------------L0.?------------------------------------------|"
- - "Committing partition 1:"
- - " Soft Deleting 3 files: L0.408, L0.409, L0.899"
- - " Creating 1 files"
- - "**** Simulation run 118, type=split(CompactAndSplitOutput(ManySmallFiles))(split_times=[1686869890068506247]). 20 Input Files, 135mb total:"
- - "L0 "
- - "L0.856[1686869244945945921,1686870115756756730] 1686929421.02s 5mb|-----------------------------------------L0.856-----------------------------------------|"
- - "L0.770[1686869244945945921,1686870115756756730] 1686929712.33s 7mb|-----------------------------------------L0.770-----------------------------------------|"
- - "L0.912[1686869244945945921,1686870115756756730] 1686929965.33s 11mb|-----------------------------------------L0.912-----------------------------------------|"
- - "L0.825[1686869244945945921,1686870115756756730] 1686930563.07s 5mb|-----------------------------------------L0.825-----------------------------------------|"
- - "L0.876[1686869244945945921,1686870115756756730] 1686930780.95s 9mb|-----------------------------------------L0.876-----------------------------------------|"
- - "L0.846[1686869244945945921,1686870115756756730] 1686931336.08s 5mb|-----------------------------------------L0.846-----------------------------------------|"
- - "L0.792[1686869244945945921,1686870115756756730] 1686931600.58s 8mb|-----------------------------------------L0.792-----------------------------------------|"
- - "L0.728[1686869244945945921,1686869939000000000] 1686931893.7s 5mb|-------------------------------L0.728--------------------------------| "
- - "L0.928[1686869999000000000,1686870115756756730] 1686931893.7s 1mb |--L0.928--| "
- - "L0.893[1686869244945945921,1686870115756756730] 1686932677.39s 15mb|-----------------------------------------L0.893-----------------------------------------|"
- - "L0.836[1686869244945945921,1686870115756756730] 1686933271.57s 5mb|-----------------------------------------L0.836-----------------------------------------|"
- - "L0.814[1686869244945945921,1686870115756756730] 1686933528.17s 7mb|-----------------------------------------L0.814-----------------------------------------|"
- - "L0.918[1686869244945945921,1686870115756756730] 1686933830.06s 11mb|-----------------------------------------L0.918-----------------------------------------|"
- - "L0.753[1686869244945945921,1686870115756756730] 1686934254.96s 8mb|-----------------------------------------L0.753-----------------------------------------|"
- - "L0.885[1686869244945945921,1686870115756756730] 1686934759.75s 5mb|-----------------------------------------L0.885-----------------------------------------|"
- - "L0.803[1686869244945945921,1686870115756756730] 1686934966.48s 7mb|-----------------------------------------L0.803-----------------------------------------|"
- - "L0.711[1686869244945945921,1686869519000000000] 1686935151.54s 2mb|----------L0.711----------| "
- - "L0.923[1686869579000000000,1686870115756756730] 1686935151.54s 6mb |-----------------------L0.923------------------------| "
- - "L0.866[1686869244945945921,1686870115756756730] 1686935546.05s 5mb|-----------------------------------------L0.866-----------------------------------------|"
- - "L0.781[1686869244945945921,1686870115756756730] 1686935742.51s 7mb|-----------------------------------------L0.781-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 135mb total:"
- - "L0 "
- - "L0.?[1686869244945945921,1686869890068506247] 1686935742.51s 100mb|------------------------------L0.?------------------------------| "
- - "L0.?[1686869890068506248,1686870115756756730] 1686935742.51s 35mb |--------L0.?---------| "
- - "Committing partition 1:"
- - " Soft Deleting 20 files: L0.711, L0.728, L0.753, L0.770, L0.781, L0.792, L0.803, L0.814, L0.825, L0.836, L0.846, L0.856, L0.866, L0.876, L0.885, L0.893, L0.912, L0.918, L0.923, L0.928"
- - " Creating 2 files"
- - "**** Simulation run 119, type=compact(ManySmallFiles). 2 Input Files, 14mb total:"
- - "L0 "
- - "L0.900[1686869244945945921,1686870115756756730] 1686935947.46s 11mb|-----------------------------------------L0.900-----------------------------------------|"
- - "L0.410[1686869244945945921,1686870115756756730] 1686936871.55s 3mb|-----------------------------------------L0.410-----------------------------------------|"
- - "**** 1 Output Files (parquet_file_id not yet assigned), 14mb total:"
- - "L0, all files 14mb "
- - "L0.?[1686869244945945921,1686870115756756730] 1686936871.55s|------------------------------------------L0.?------------------------------------------|"
- - "Committing partition 1:"
- - " Soft Deleting 2 files: L0.410, L0.900"
- - " Creating 1 files"
- - "**** Simulation run 120, type=split(CompactAndSplitOutput(ManySmallFiles))(split_times=[1686870676199779489]). 20 Input Files, 155mb total:"
- - "L0 "
- - "L0.857[1686870115756756731,1686870986567567540] 1686929421.02s 5mb|-----------------------------------------L0.857-----------------------------------------|"
- - "L0.771[1686870115756756731,1686870986567567540] 1686929712.33s 7mb|-----------------------------------------L0.771-----------------------------------------|"
- - "L0.913[1686870115756756731,1686870986567567540] 1686929965.33s 11mb|-----------------------------------------L0.913-----------------------------------------|"
- - "L0.826[1686870115756756731,1686870986567567540] 1686930563.07s 5mb|-----------------------------------------L0.826-----------------------------------------|"
- - "L0.877[1686870115756756731,1686870986567567540] 1686930780.95s 9mb|-----------------------------------------L0.877-----------------------------------------|"
- - "L0.847[1686870115756756731,1686870986567567540] 1686931336.08s 5mb|-----------------------------------------L0.847-----------------------------------------|"
- - "L0.793[1686870115756756731,1686870986567567540] 1686931600.58s 8mb|-----------------------------------------L0.793-----------------------------------------|"
- - "L0.929[1686870115756756731,1686870986567567540] 1686931893.7s 11mb|-----------------------------------------L0.929-----------------------------------------|"
- - "L0.894[1686870115756756731,1686870986567567540] 1686932677.39s 15mb|-----------------------------------------L0.894-----------------------------------------|"
- - "L0.837[1686870115756756731,1686870986567567540] 1686933271.57s 5mb|-----------------------------------------L0.837-----------------------------------------|"
- - "L0.815[1686870115756756731,1686870986567567540] 1686933528.17s 7mb|-----------------------------------------L0.815-----------------------------------------|"
- - "L0.919[1686870115756756731,1686870986567567540] 1686933830.06s 11mb|-----------------------------------------L0.919-----------------------------------------|"
- - "L0.754[1686870115756756731,1686870986567567540] 1686934254.96s 8mb|-----------------------------------------L0.754-----------------------------------------|"
- - "L0.886[1686870115756756731,1686870986567567540] 1686934759.75s 5mb|-----------------------------------------L0.886-----------------------------------------|"
- - "L0.804[1686870115756756731,1686870986567567540] 1686934966.48s 7mb|-----------------------------------------L0.804-----------------------------------------|"
- - "L0.924[1686870115756756731,1686870986567567540] 1686935151.54s 11mb|-----------------------------------------L0.924-----------------------------------------|"
- - "L0.867[1686870115756756731,1686870986567567540] 1686935546.05s 5mb|-----------------------------------------L0.867-----------------------------------------|"
- - "L0.782[1686870115756756731,1686870986567567540] 1686935742.51s 7mb|-----------------------------------------L0.782-----------------------------------------|"
- - "L0.901[1686870115756756731,1686870986567567540] 1686935947.46s 11mb|-----------------------------------------L0.901-----------------------------------------|"
- - "L0.411[1686870115756756731,1686870986567567540] 1686936871.55s 3mb|-----------------------------------------L0.411-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 155mb total:"
- - "L0 "
- - "L0.?[1686870115756756731,1686870676199779489] 1686936871.55s 100mb|-------------------------L0.?--------------------------| "
- - "L0.?[1686870676199779490,1686870986567567540] 1686936871.55s 55mb |-------------L0.?-------------| "
- - "Committing partition 1:"
- - " Soft Deleting 20 files: L0.411, L0.754, L0.771, L0.782, L0.793, L0.804, L0.815, L0.826, L0.837, L0.847, L0.857, L0.867, L0.877, L0.886, L0.894, L0.901, L0.913, L0.919, L0.924, L0.929"
- - " Creating 2 files"
- - "**** Simulation run 121, type=split(CompactAndSplitOutput(ManySmallFiles))(split_times=[1686871550514515284]). 20 Input Files, 154mb total:"
- - "L0 "
- - "L0.858[1686870986567567541,1686871857378378350] 1686929421.02s 5mb|-----------------------------------------L0.858-----------------------------------------|"
- - "L0.772[1686870986567567541,1686871857378378350] 1686929712.33s 7mb|-----------------------------------------L0.772-----------------------------------------|"
- - "L0.914[1686870986567567541,1686871857378378350] 1686929965.33s 11mb|-----------------------------------------L0.914-----------------------------------------|"
- - "L0.827[1686870986567567541,1686871857378378350] 1686930563.07s 5mb|-----------------------------------------L0.827-----------------------------------------|"
- - "L0.878[1686870986567567541,1686871857378378350] 1686930780.95s 9mb|-----------------------------------------L0.878-----------------------------------------|"
- - "L0.848[1686870986567567541,1686871857378378350] 1686931336.08s 5mb|-----------------------------------------L0.848-----------------------------------------|"
- - "L0.794[1686870986567567541,1686871857378378350] 1686931600.58s 8mb|-----------------------------------------L0.794-----------------------------------------|"
- - "L0.930[1686870986567567541,1686871857378378350] 1686931893.7s 11mb|-----------------------------------------L0.930-----------------------------------------|"
- - "L0.895[1686870986567567541,1686871857378378350] 1686932677.39s 15mb|-----------------------------------------L0.895-----------------------------------------|"
- - "L0.838[1686870986567567541,1686871857378378350] 1686933271.57s 5mb|-----------------------------------------L0.838-----------------------------------------|"
- - "L0.816[1686870986567567541,1686871857378378350] 1686933528.17s 7mb|-----------------------------------------L0.816-----------------------------------------|"
- - "L0.920[1686870986567567541,1686871857378378350] 1686933830.06s 11mb|-----------------------------------------L0.920-----------------------------------------|"
- - "L0.755[1686870986567567541,1686871857378378350] 1686934254.96s 8mb|-----------------------------------------L0.755-----------------------------------------|"
- - "L0.887[1686870986567567541,1686871857378378350] 1686934759.75s 5mb|-----------------------------------------L0.887-----------------------------------------|"
- - "L0.805[1686870986567567541,1686871857378378350] 1686934966.48s 7mb|-----------------------------------------L0.805-----------------------------------------|"
- - "L0.925[1686870986567567541,1686871857378378350] 1686935151.54s 11mb|-----------------------------------------L0.925-----------------------------------------|"
- - "L0.868[1686870986567567541,1686871857378378350] 1686935546.05s 5mb|-----------------------------------------L0.868-----------------------------------------|"
- - "L0.783[1686870986567567541,1686871857378378350] 1686935742.51s 7mb|-----------------------------------------L0.783-----------------------------------------|"
- - "L0.902[1686870986567567541,1686871857378378350] 1686935947.46s 11mb|-----------------------------------------L0.902-----------------------------------------|"
- - "L0.412[1686870986567567541,1686871559000000000] 1686936871.55s 2mb|-------------------------L0.412--------------------------| "
- - "**** 2 Output Files (parquet_file_id not yet assigned), 154mb total:"
- - "L0 "
- - "L0.?[1686870986567567541,1686871550514515284] 1686936871.55s 100mb|--------------------------L0.?--------------------------| "
- - "L0.?[1686871550514515285,1686871857378378350] 1686936871.55s 54mb |------------L0.?-------------| "
- - "Committing partition 1:"
- - " Soft Deleting 20 files: L0.412, L0.755, L0.772, L0.783, L0.794, L0.805, L0.816, L0.827, L0.838, L0.848, L0.858, L0.868, L0.878, L0.887, L0.895, L0.902, L0.914, L0.920, L0.925, L0.930"
- - " Creating 2 files"
- - "**** Simulation run 122, type=compact(ManySmallFiles). 1 Input Files, 1mb total:"
- - "L0, all files 1mb "
- - "L0.933[1686871619000000000,1686871857378378350] 1686936871.55s|-----------------------------------------L0.933-----------------------------------------|"
- - "**** 1 Output Files (parquet_file_id not yet assigned), 1mb total:"
- - "L0, all files 1mb "
- - "L0.?[1686871619000000000,1686871857378378350] 1686936871.55s|------------------------------------------L0.?------------------------------------------|"
- - "Committing partition 1:"
- - " Soft Deleting 1 files: L0.933"
- - " Creating 1 files"
- - "**** Simulation run 123, type=split(CompactAndSplitOutput(ManySmallFiles))(split_times=[1686872413821229216]). 20 Input Files, 156mb total:"
- - "L0 "
- - "L0.859[1686871857378378351,1686872728189189160] 1686929421.02s 5mb|-----------------------------------------L0.859-----------------------------------------|"
- - "L0.773[1686871857378378351,1686872728189189160] 1686929712.33s 7mb|-----------------------------------------L0.773-----------------------------------------|"
- - "L0.915[1686871857378378351,1686872728189189160] 1686929965.33s 11mb|-----------------------------------------L0.915-----------------------------------------|"
- - "L0.828[1686871857378378351,1686872728189189160] 1686930563.07s 5mb|-----------------------------------------L0.828-----------------------------------------|"
- - "L0.879[1686871857378378351,1686872728189189160] 1686930780.95s 9mb|-----------------------------------------L0.879-----------------------------------------|"
- - "L0.849[1686871857378378351,1686872728189189160] 1686931336.08s 5mb|-----------------------------------------L0.849-----------------------------------------|"
- - "L0.795[1686871857378378351,1686872728189189160] 1686931600.58s 8mb|-----------------------------------------L0.795-----------------------------------------|"
- - "L0.931[1686871857378378351,1686872728189189160] 1686931893.7s 11mb|-----------------------------------------L0.931-----------------------------------------|"
- - "L0.896[1686871857378378351,1686872728189189160] 1686932677.39s 15mb|-----------------------------------------L0.896-----------------------------------------|"
- - "L0.839[1686871857378378351,1686872728189189160] 1686933271.57s 5mb|-----------------------------------------L0.839-----------------------------------------|"
- - "L0.817[1686871857378378351,1686872728189189160] 1686933528.17s 7mb|-----------------------------------------L0.817-----------------------------------------|"
- - "L0.921[1686871857378378351,1686872728189189160] 1686933830.06s 11mb|-----------------------------------------L0.921-----------------------------------------|"
- - "L0.756[1686871857378378351,1686872728189189160] 1686934254.96s 8mb|-----------------------------------------L0.756-----------------------------------------|"
- - "L0.888[1686871857378378351,1686872728189189160] 1686934759.75s 5mb|-----------------------------------------L0.888-----------------------------------------|"
- - "L0.806[1686871857378378351,1686872728189189160] 1686934966.48s 7mb|-----------------------------------------L0.806-----------------------------------------|"
- - "L0.926[1686871857378378351,1686872728189189160] 1686935151.54s 11mb|-----------------------------------------L0.926-----------------------------------------|"
- - "L0.869[1686871857378378351,1686872728189189160] 1686935546.05s 5mb|-----------------------------------------L0.869-----------------------------------------|"
- - "L0.784[1686871857378378351,1686872728189189160] 1686935742.51s 7mb|-----------------------------------------L0.784-----------------------------------------|"
- - "L0.903[1686871857378378351,1686872728189189160] 1686935947.46s 11mb|-----------------------------------------L0.903-----------------------------------------|"
- - "L0.934[1686871857378378351,1686872728189189160] 1686936871.55s 4mb|-----------------------------------------L0.934-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 156mb total:"
- - "L0 "
- - "L0.?[1686871857378378351,1686872413821229216] 1686936871.55s 100mb|-------------------------L0.?--------------------------| "
- - "L0.?[1686872413821229217,1686872728189189160] 1686936871.55s 56mb |-------------L0.?-------------| "
- - "Committing partition 1:"
- - " Soft Deleting 20 files: L0.756, L0.773, L0.784, L0.795, L0.806, L0.817, L0.828, L0.839, L0.849, L0.859, L0.869, L0.879, L0.888, L0.896, L0.903, L0.915, L0.921, L0.926, L0.931, L0.934"
- - " Creating 2 files"
- - "**** Simulation run 124, type=split(CompactAndSplitOutput(ManySmallFiles))(split_times=[1686873284631629744]). 20 Input Files, 156mb total:"
- - "L0 "
- - "L0.860[1686872728189189161,1686873599000000000] 1686929421.02s 5mb|----------------------------------------L0.860-----------------------------------------| "
- - "L0.774[1686872728189189161,1686873599000000000] 1686929712.33s 7mb|----------------------------------------L0.774-----------------------------------------| "
- - "L0.916[1686872728189189161,1686873599000000000] 1686929965.33s 11mb|----------------------------------------L0.916-----------------------------------------| "
- - "L0.829[1686872728189189161,1686873599000000000] 1686930563.07s 5mb|----------------------------------------L0.829-----------------------------------------| "
- - "L0.880[1686872728189189161,1686873599000000000] 1686930780.95s 9mb|----------------------------------------L0.880-----------------------------------------| "
- - "L0.850[1686872728189189161,1686873599000000000] 1686931336.08s 5mb|----------------------------------------L0.850-----------------------------------------| "
- - "L0.796[1686872728189189161,1686873599000000000] 1686931600.58s 8mb|----------------------------------------L0.796-----------------------------------------| "
- - "L0.932[1686872728189189161,1686873599000000000] 1686931893.7s 11mb|----------------------------------------L0.932-----------------------------------------| "
- - "L0.897[1686872728189189161,1686873599000000000] 1686932677.39s 15mb|----------------------------------------L0.897-----------------------------------------| "
- - "L0.840[1686872728189189161,1686873599000000000] 1686933271.57s 5mb|----------------------------------------L0.840-----------------------------------------| "
- - "L0.818[1686872728189189161,1686873599000000000] 1686933528.17s 7mb|----------------------------------------L0.818-----------------------------------------| "
- - "L0.922[1686872728189189161,1686873599000000000] 1686933830.06s 11mb|----------------------------------------L0.922-----------------------------------------| "
- - "L0.757[1686872728189189161,1686873599000000000] 1686934254.96s 8mb|----------------------------------------L0.757-----------------------------------------| "
- - "L0.889[1686872728189189161,1686873599000000000] 1686934759.75s 5mb|----------------------------------------L0.889-----------------------------------------| "
- - "L0.807[1686872728189189161,1686873599000000000] 1686934966.48s 7mb|----------------------------------------L0.807-----------------------------------------| "
- - "L0.927[1686872728189189161,1686873599000000000] 1686935151.54s 11mb|----------------------------------------L0.927-----------------------------------------| "
- - "L0.870[1686872728189189161,1686873599000000000] 1686935546.05s 5mb|----------------------------------------L0.870-----------------------------------------| "
- - "L0.785[1686872728189189161,1686873599000000000] 1686935742.51s 7mb|----------------------------------------L0.785-----------------------------------------| "
- - "L0.904[1686872728189189161,1686873599000000000] 1686935947.46s 11mb|----------------------------------------L0.904-----------------------------------------| "
- - "L0.935[1686872728189189161,1686873599000000000] 1686936871.55s 4mb|----------------------------------------L0.935-----------------------------------------| "
- - "**** 2 Output Files (parquet_file_id not yet assigned), 156mb total:"
- - "L0 "
- - "L0.?[1686872728189189161,1686873284631629744] 1686936871.55s 100mb|-------------------------L0.?--------------------------| "
- - "L0.?[1686873284631629745,1686873599000000000] 1686936871.55s 56mb |-------------L0.?-------------| "
- - "Committing partition 1:"
- - " Soft Deleting 20 files: L0.757, L0.774, L0.785, L0.796, L0.807, L0.818, L0.829, L0.840, L0.850, L0.860, L0.870, L0.880, L0.889, L0.897, L0.904, L0.916, L0.922, L0.927, L0.932, L0.935"
- - " Creating 2 files"
- - "**** Simulation run 125, type=split(HighL0OverlapTotalBacklog)(split_times=[1686855892513513500]). 1 Input Files, 1mb total:"
- - "L1, all files 1mb "
- - "L1.656[1686855311972972961,1686856182783783770] 1686928854.57s|-----------------------------------------L1.656-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:"
- - "L1 "
- - "L1.?[1686855311972972961,1686855892513513500] 1686928854.57s 746kb|--------------------------L1.?---------------------------| "
- - "L1.?[1686855892513513501,1686856182783783770] 1686928854.57s 373kb |-----------L1.?------------| "
- - "**** Simulation run 126, type=split(HighL0OverlapTotalBacklog)(split_times=[1686855892513513500, 1686856473054054039]). 1 Input Files, 100mb total:"
- - "L0, all files 100mb "
- - "L0.978[1686855311972972961,1686856536496842959] 1686932677.39s|-----------------------------------------L0.978-----------------------------------------|"
- - "**** 3 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L0 "
- - "L0.?[1686855311972972961,1686855892513513500] 1686932677.39s 47mb|------------------L0.?------------------| "
- - "L0.?[1686855892513513501,1686856473054054039] 1686932677.39s 47mb |------------------L0.?------------------| "
- - "L0.?[1686856473054054040,1686856536496842959] 1686932677.39s 5mb |L0.?|"
- - "**** Simulation run 127, type=split(HighL0OverlapTotalBacklog)(split_times=[1686855892513513500, 1686856473054054039]). 1 Input Files, 100mb total:"
- - "L0, all files 100mb "
- - "L0.980[1686855311972972961,1686856564304278603] 1686935742.51s|-----------------------------------------L0.980-----------------------------------------|"
- - "**** 3 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L0 "
- - "L0.?[1686855311972972961,1686855892513513500] 1686935742.51s 46mb|-----------------L0.?------------------| "
- - "L0.?[1686855892513513501,1686856473054054039] 1686935742.51s 46mb |-----------------L0.?------------------| "
- - "L0.?[1686856473054054040,1686856564304278603] 1686935742.51s 7mb |L0.?| "
- - "**** Simulation run 128, type=split(HighL0OverlapTotalBacklog)(split_times=[1686855892513513500, 1686856473054054039]). 1 Input Files, 17mb total:"
- - "L0, all files 17mb "
- - "L0.982[1686855311972972961,1686857053594594580] 1686936871.55s|-----------------------------------------L0.982-----------------------------------------|"
- - "**** 3 Output Files (parquet_file_id not yet assigned), 17mb total:"
- - "L0 "
- - "L0.?[1686855311972972961,1686855892513513500] 1686936871.55s 6mb|-----------L0.?------------| "
- - "L0.?[1686855892513513501,1686856473054054039] 1686936871.55s 6mb |-----------L0.?------------| "
- - "L0.?[1686856473054054040,1686857053594594580] 1686936871.55s 6mb |------------L0.?------------| "
- - "**** Simulation run 129, type=split(HighL0OverlapTotalBacklog)(split_times=[1686856473054054039]). 1 Input Files, 1mb total:"
- - "L1, all files 1mb "
- - "L1.657[1686856182783783771,1686857053594594580] 1686928854.57s|-----------------------------------------L1.657-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:"
- - "L1 "
- - "L1.?[1686856182783783771,1686856473054054039] 1686928854.57s 373kb|-----------L1.?------------| "
- - "L1.?[1686856473054054040,1686857053594594580] 1686928854.57s 746kb |---------------------------L1.?---------------------------| "
- - "**** Simulation run 130, type=split(HighL0OverlapTotalBacklog)(split_times=[1686857634135135120]). 1 Input Files, 1mb total:"
- - "L1, all files 1mb "
- - "L1.658[1686857053594594581,1686857924405405390] 1686928854.57s|-----------------------------------------L1.658-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:"
- - "L1 "
- - "L1.?[1686857053594594581,1686857634135135120] 1686928854.57s 746kb|--------------------------L1.?---------------------------| "
- - "L1.?[1686857634135135121,1686857924405405390] 1686928854.57s 373kb |-----------L1.?------------| "
- - "**** Simulation run 131, type=split(HighL0OverlapTotalBacklog)(split_times=[1686857634135135120, 1686858214675675659]). 1 Input Files, 100mb total:"
- - "L0, all files 100mb "
- - "L0.983[1686857053594594581,1686858278525917085] 1686932677.39s|-----------------------------------------L0.983-----------------------------------------|"
- - "**** 3 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L0 "
- - "L0.?[1686857053594594581,1686857634135135120] 1686932677.39s 47mb|------------------L0.?------------------| "
- - "L0.?[1686857634135135121,1686858214675675659] 1686932677.39s 47mb |------------------L0.?------------------| "
- - "L0.?[1686858214675675660,1686858278525917085] 1686932677.39s 5mb |L0.?|"
- - "**** Simulation run 132, type=split(HighL0OverlapTotalBacklog)(split_times=[1686857634135135120, 1686858214675675659]). 1 Input Files, 100mb total:"
- - "L0, all files 100mb "
- - "L0.985[1686857053594594581,1686858251288003855] 1686935947.46s|-----------------------------------------L0.985-----------------------------------------|"
- - "**** 3 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L0 "
- - "L0.?[1686857053594594581,1686857634135135120] 1686935947.46s 48mb|------------------L0.?-------------------| "
- - "L0.?[1686857634135135121,1686858214675675659] 1686935947.46s 48mb |------------------L0.?-------------------| "
- - "L0.?[1686858214675675660,1686858251288003855] 1686935947.46s 3mb |L0.?|"
- - "**** Simulation run 133, type=split(HighL0OverlapTotalBacklog)(split_times=[1686857634135135120, 1686858214675675659]). 1 Input Files, 11mb total:"
- - "L0, all files 11mb "
- - "L0.987[1686857053594594581,1686858795216216200] 1686936871.55s|-----------------------------------------L0.987-----------------------------------------|"
- - "**** 3 Output Files (parquet_file_id not yet assigned), 11mb total:"
- - "L0 "
- - "L0.?[1686857053594594581,1686857634135135120] 1686936871.55s 4mb|-----------L0.?------------| "
- - "L0.?[1686857634135135121,1686858214675675659] 1686936871.55s 4mb |-----------L0.?------------| "
- - "L0.?[1686858214675675660,1686858795216216200] 1686936871.55s 4mb |------------L0.?------------| "
- - "**** Simulation run 134, type=split(HighL0OverlapTotalBacklog)(split_times=[1686858214675675659]). 1 Input Files, 1mb total:"
- - "L1, all files 1mb "
- - "L1.659[1686857924405405391,1686858795216216200] 1686928854.57s|-----------------------------------------L1.659-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:"
- - "L1 "
- - "L1.?[1686857924405405391,1686858214675675659] 1686928854.57s 373kb|-----------L1.?------------| "
- - "L1.?[1686858214675675660,1686858795216216200] 1686928854.57s 746kb |---------------------------L1.?---------------------------| "
- - "**** Simulation run 135, type=split(HighL0OverlapTotalBacklog)(split_times=[1686859375756756740, 1686859956297297279]). 1 Input Files, 100mb total:"
- - "L0, all files 100mb "
- - "L0.988[1686858795216216201,1686859969989511638] 1686932677.39s|-----------------------------------------L0.988-----------------------------------------|"
- - "**** 3 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L0 "
- - "L0.?[1686858795216216201,1686859375756756740] 1686932677.39s 49mb|-------------------L0.?-------------------| "
- - "L0.?[1686859375756756741,1686859956297297279] 1686932677.39s 49mb |-------------------L0.?-------------------| "
- - "L0.?[1686859956297297280,1686859969989511638] 1686932677.39s 1mb |L0.?|"
- - "**** Simulation run 136, type=split(HighL0OverlapTotalBacklog)(split_times=[1686859375756756740, 1686859956297297279]). 1 Input Files, 100mb total:"
- - "L0, all files 100mb "
- - "L0.990[1686858795216216201,1686859988431489230] 1686935947.46s|-----------------------------------------L0.990-----------------------------------------|"
- - "**** 3 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L0 "
- - "L0.?[1686858795216216201,1686859375756756740] 1686935947.46s 49mb|------------------L0.?-------------------| "
- - "L0.?[1686859375756756741,1686859956297297279] 1686935947.46s 49mb |------------------L0.?-------------------| "
- - "L0.?[1686859956297297280,1686859988431489230] 1686935947.46s 3mb |L0.?|"
- - "**** Simulation run 137, type=split(HighL0OverlapTotalBacklog)(split_times=[1686859375756756740, 1686859956297297279]). 1 Input Files, 6mb total:"
- - "L0, all files 6mb "
- - "L0.992[1686858795216216201,1686860536837837820] 1686936871.55s|-----------------------------------------L0.992-----------------------------------------|"
- - "**** 3 Output Files (parquet_file_id not yet assigned), 6mb total:"
- - "L0 "
- - "L0.?[1686858795216216201,1686859375756756740] 1686936871.55s 2mb|-----------L0.?------------| "
- - "L0.?[1686859375756756741,1686859956297297279] 1686936871.55s 2mb |-----------L0.?------------| "
- - "L0.?[1686859956297297280,1686860536837837820] 1686936871.55s 2mb |------------L0.?------------| "
- - "**** Simulation run 138, type=split(HighL0OverlapTotalBacklog)(split_times=[1686859375756756740]). 1 Input Files, 579kb total:"
- - "L1, all files 579kb "
- - "L1.53[1686859079000000000,1686859499000000000] 1686928854.57s|-----------------------------------------L1.53------------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 579kb total:"
- - "L1 "
- - "L1.?[1686859079000000000,1686859375756756740] 1686928854.57s 409kb|----------------------------L1.?-----------------------------| "
- - "L1.?[1686859375756756741,1686859499000000000] 1686928854.57s 170kb |----------L1.?----------| "
- - "**** Simulation run 139, type=split(HighL0OverlapTotalBacklog)(split_times=[1686859956297297279]). 1 Input Files, 1mb total:"
- - "L1, all files 1mb "
- - "L1.740[1686859666027027011,1686860536837837820] 1686928854.57s|-----------------------------------------L1.740-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:"
- - "L1 "
- - "L1.?[1686859666027027011,1686859956297297279] 1686928854.57s 371kb|-----------L1.?------------| "
- - "L1.?[1686859956297297280,1686860536837837820] 1686928854.57s 743kb |---------------------------L1.?---------------------------| "
- - "**** Simulation run 140, type=split(HighL0OverlapTotalBacklog)(split_times=[1686861117378378360]). 1 Input Files, 1mb total:"
- - "L1, all files 1mb "
- - "L1.741[1686860536837837821,1686861407648648630] 1686928854.57s|-----------------------------------------L1.741-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:"
- - "L1 "
- - "L1.?[1686860536837837821,1686861117378378360] 1686928854.57s 743kb|--------------------------L1.?---------------------------| "
- - "L1.?[1686861117378378361,1686861407648648630] 1686928854.57s 371kb |-----------L1.?------------| "
- - "**** Simulation run 141, type=split(HighL0OverlapTotalBacklog)(split_times=[1686861117378378360, 1686861697918918899]). 1 Input Files, 100mb total:"
- - "L0, all files 100mb "
- - "L0.1007[1686860536837837821,1686861711611133258] 1686932677.39s|----------------------------------------L0.1007-----------------------------------------|"
- - "**** 3 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L0 "
- - "L0.?[1686860536837837821,1686861117378378360] 1686932677.39s 49mb|-------------------L0.?-------------------| "
- - "L0.?[1686861117378378361,1686861697918918899] 1686932677.39s 49mb |-------------------L0.?-------------------| "
- - "L0.?[1686861697918918900,1686861711611133258] 1686932677.39s 1mb |L0.?|"
- - "**** Simulation run 142, type=split(HighL0OverlapTotalBacklog)(split_times=[1686861117378378360, 1686861697918918899]). 1 Input Files, 100mb total:"
- - "L0, all files 100mb "
- - "L0.1009[1686860536837837821,1686861730053110850] 1686935947.46s|----------------------------------------L0.1009-----------------------------------------|"
- - "**** 3 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L0 "
- - "L0.?[1686860536837837821,1686861117378378360] 1686935947.46s 49mb|------------------L0.?-------------------| "
- - "L0.?[1686861117378378361,1686861697918918899] 1686935947.46s 49mb |------------------L0.?-------------------| "
- - "L0.?[1686861697918918900,1686861730053110850] 1686935947.46s 3mb |L0.?|"
- - "**** Simulation run 143, type=split(HighL0OverlapTotalBacklog)(split_times=[1686861117378378360, 1686861697918918899]). 1 Input Files, 6mb total:"
- - "L0, all files 6mb "
- - "L0.993[1686860536837837821,1686862278459459440] 1686936871.55s|-----------------------------------------L0.993-----------------------------------------|"
- - "**** 3 Output Files (parquet_file_id not yet assigned), 6mb total:"
- - "L0 "
- - "L0.?[1686860536837837821,1686861117378378360] 1686936871.55s 2mb|-----------L0.?------------| "
- - "L0.?[1686861117378378361,1686861697918918899] 1686936871.55s 2mb |-----------L0.?------------| "
- - "L0.?[1686861697918918900,1686862278459459440] 1686936871.55s 2mb |------------L0.?------------| "
- - "**** Simulation run 144, type=split(HighL0OverlapTotalBacklog)(split_times=[1686862858999999980, 1686863439540540519]). 1 Input Files, 100mb total:"
- - "L0, all files 100mb "
- - "L0.994[1686862278459459441,1686863453232754878] 1686932677.39s|-----------------------------------------L0.994-----------------------------------------|"
- - "**** 3 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L0 "
- - "L0.?[1686862278459459441,1686862858999999980] 1686932677.39s 49mb|-------------------L0.?-------------------| "
- - "L0.?[1686862858999999981,1686863439540540519] 1686932677.39s 49mb |-------------------L0.?-------------------| "
- - "L0.?[1686863439540540520,1686863453232754878] 1686932677.39s 1mb |L0.?|"
- - "**** Simulation run 145, type=split(HighL0OverlapTotalBacklog)(split_times=[1686862858999999980, 1686863439540540519]). 1 Input Files, 100mb total:"
- - "L0, all files 100mb "
- - "L0.996[1686862278459459441,1686863528879205201] 1686935947.46s|-----------------------------------------L0.996-----------------------------------------|"
- - "**** 3 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L0 "
- - "L0.?[1686862278459459441,1686862858999999980] 1686935947.46s 46mb|-----------------L0.?------------------| "
- - "L0.?[1686862858999999981,1686863439540540519] 1686935947.46s 46mb |-----------------L0.?------------------| "
- - "L0.?[1686863439540540520,1686863528879205201] 1686935947.46s 7mb |L0.?| "
- - "**** Simulation run 146, type=split(HighL0OverlapTotalBacklog)(split_times=[1686862858999999980, 1686863439540540519]). 1 Input Files, 11mb total:"
- - "L0, all files 11mb "
- - "L0.998[1686862278459459441,1686864020081081060] 1686936871.55s|-----------------------------------------L0.998-----------------------------------------|"
- - "**** 3 Output Files (parquet_file_id not yet assigned), 11mb total:"
- - "L0 "
- - "L0.?[1686862278459459441,1686862858999999980] 1686936871.55s 4mb|-----------L0.?------------| "
- - "L0.?[1686862858999999981,1686863439540540519] 1686936871.55s 4mb |-----------L0.?------------| "
- - "L0.?[1686863439540540520,1686864020081081060] 1686936871.55s 4mb |------------L0.?------------| "
- - "**** Simulation run 147, type=split(HighL0OverlapTotalBacklog)(split_times=[1686863439540540519]). 1 Input Files, 703kb total:"
- - "L1, all files 703kb "
- - "L1.744[1686863149270270251,1686863699000000000] 1686928854.57s|-----------------------------------------L1.744-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 703kb total:"
- - "L1 "
- - "L1.?[1686863149270270251,1686863439540540519] 1686928854.57s 371kb|--------------------L1.?---------------------| "
- - "L1.?[1686863439540540520,1686863699000000000] 1686928854.57s 332kb |------------------L1.?------------------| "
- - "**** Simulation run 148, type=split(HighL0OverlapTotalBacklog)(split_times=[1686861697918918899]). 1 Input Files, 1mb total:"
- - "L1, all files 1mb "
- - "L1.742[1686861407648648631,1686862278459459440] 1686928854.57s|-----------------------------------------L1.742-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:"
- - "L1 "
- - "L1.?[1686861407648648631,1686861697918918899] 1686928854.57s 371kb|-----------L1.?------------| "
- - "L1.?[1686861697918918900,1686862278459459440] 1686928854.57s 743kb |---------------------------L1.?---------------------------| "
- - "**** Simulation run 149, type=split(HighL0OverlapTotalBacklog)(split_times=[1686862858999999980]). 1 Input Files, 1mb total:"
- - "L1, all files 1mb "
- - "L1.743[1686862278459459441,1686863149270270250] 1686928854.57s|-----------------------------------------L1.743-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:"
- - "L1 "
- - "L1.?[1686862278459459441,1686862858999999980] 1686928854.57s 743kb|--------------------------L1.?---------------------------| "
- - "L1.?[1686862858999999981,1686863149270270250] 1686928854.57s 371kb |-----------L1.?------------| "
- - "Committing partition 1:"
- - " Soft Deleting 25 files: L1.53, L1.656, L1.657, L1.658, L1.659, L1.740, L1.741, L1.742, L1.743, L1.744, L0.978, L0.980, L0.982, L0.983, L0.985, L0.987, L0.988, L0.990, L0.992, L0.993, L0.994, L0.996, L0.998, L0.1007, L0.1009"
- - " Creating 65 files"
- - "**** Simulation run 150, type=split(ReduceOverlap)(split_times=[1686852699540540530]). 1 Input Files, 82mb total:"
- - "L0, all files 82mb "
- - "L0.966[1686851828729729721,1686853222027027016] 1686931600.58s|-----------------------------------------L0.966-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 82mb total:"
- - "L0 "
- - "L0.?[1686851828729729721,1686852699540540530] 1686931600.58s 51mb|-------------------------L0.?-------------------------| "
- - "L0.?[1686852699540540531,1686853222027027016] 1686931600.58s 31mb |-------------L0.?--------------| "
- - "**** Simulation run 151, type=split(ReduceOverlap)(split_times=[1686864890891891870]). 1 Input Files, 78mb total:"
- - "L0, all files 78mb "
- - "L0.999[1686864020081081061,1686865413378378356] 1686931893.7s|-----------------------------------------L0.999-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 78mb total:"
- - "L0 "
- - "L0.?[1686864020081081061,1686864890891891870] 1686931893.7s 49mb|-------------------------L0.?-------------------------| "
- - "L0.?[1686864890891891871,1686865413378378356] 1686931893.7s 29mb |-------------L0.?--------------| "
- - "**** Simulation run 152, type=split(ReduceOverlap)(split_times=[1686842249810810810]). 1 Input Files, 100mb total:"
- - "L0, all files 100mb "
- - "L0.936[1686841379000000000,1686842589641148927] 1686932677.39s|-----------------------------------------L0.936-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L0 "
- - "L0.?[1686841379000000000,1686842249810810810] 1686932677.39s 72mb|-----------------------------L0.?-----------------------------| "
- - "L0.?[1686842249810810811,1686842589641148927] 1686932677.39s 28mb |---------L0.?----------| "
- - "**** Simulation run 153, type=split(ReduceOverlap)(split_times=[1686843991432432430]). 1 Input Files, 100mb total:"
- - "L0, all files 100mb "
- - "L0.941[1686843120621621621,1686844331262770547] 1686932677.39s|-----------------------------------------L0.941-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L0 "
- - "L0.?[1686843120621621621,1686843991432432430] 1686932677.39s 72mb|-----------------------------L0.?-----------------------------| "
- - "L0.?[1686843991432432431,1686844331262770547] 1686932677.39s 28mb |---------L0.?----------| "
- - "**** Simulation run 154, type=split(ReduceOverlap)(split_times=[1686845579000000000, 1686845733054054050]). 1 Input Files, 100mb total:"
- - "L0, all files 100mb "
- - "L0.946[1686844862243243241,1686846072884392167] 1686932677.39s|-----------------------------------------L0.946-----------------------------------------|"
- - "**** 3 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L0 "
- - "L0.?[1686844862243243241,1686845579000000000] 1686932677.39s 59mb|-----------------------L0.?------------------------| "
- - "L0.?[1686845579000000001,1686845733054054050] 1686932677.39s 13mb |--L0.?---| "
- - "L0.?[1686845733054054051,1686846072884392167] 1686932677.39s 28mb |---------L0.?----------| "
- - "**** Simulation run 155, type=split(ReduceOverlap)(split_times=[1686847474675675670]). 1 Input Files, 100mb total:"
- - "L0, all files 100mb "
- - "L0.950[1686846603864864861,1686847814506013787] 1686932677.39s|-----------------------------------------L0.950-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L0 "
- - "L0.?[1686846603864864861,1686847474675675670] 1686932677.39s 72mb|-----------------------------L0.?-----------------------------| "
- - "L0.?[1686847474675675671,1686847814506013787] 1686932677.39s 28mb |---------L0.?----------| "
- - "**** Simulation run 156, type=split(ReduceOverlap)(split_times=[1686849216297297290]). 1 Input Files, 100mb total:"
- - "L0, all files 100mb "
- - "L0.955[1686848345486486481,1686849600239388909] 1686932677.39s|-----------------------------------------L0.955-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L0 "
- - "L0.?[1686848345486486481,1686849216297297290] 1686932677.39s 69mb|----------------------------L0.?----------------------------| "
- - "L0.?[1686849216297297291,1686849600239388909] 1686932677.39s 31mb |----------L0.?-----------| "
- - "**** Simulation run 157, type=split(ReduceOverlap)(split_times=[1686849779000000000]). 1 Input Files, 39mb total:"
- - "L0, all files 39mb "
- - "L0.956[1686849600239388910,1686850087108108100] 1686932677.39s|-----------------------------------------L0.956-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 39mb total:"
- - "L0 "
- - "L0.?[1686849600239388910,1686849779000000000] 1686932677.39s 14mb|-------------L0.?--------------| "
- - "L0.?[1686849779000000001,1686850087108108100] 1686932677.39s 25mb |-------------------------L0.?-------------------------| "
- - "**** Simulation run 158, type=split(ReduceOverlap)(split_times=[1686850559000000000, 1686850957918918910]). 1 Input Files, 100mb total:"
- - "L0, all files 100mb "
- - "L0.960[1686850087108108101,1686851297766913590] 1686932677.39s|-----------------------------------------L0.960-----------------------------------------|"
- - "**** 3 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L0 "
- - "L0.?[1686850087108108101,1686850559000000000] 1686932677.39s 39mb|--------------L0.?---------------| "
- - "L0.?[1686850559000000001,1686850957918918910] 1686932677.39s 33mb |-----------L0.?------------| "
- - "L0.?[1686850957918918911,1686851297766913590] 1686932677.39s 28mb |---------L0.?----------| "
- - "**** Simulation run 159, type=split(ReduceOverlap)(split_times=[1686854441162162150, 1686854819000000000]). 1 Input Files, 100mb total:"
- - "L0, all files 100mb "
- - "L0.972[1686853570351351341,1686854830189955965] 1686932677.39s|-----------------------------------------L0.972-----------------------------------------|"
- - "**** 3 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L0 "
- - "L0.?[1686853570351351341,1686854441162162150] 1686932677.39s 69mb|----------------------------L0.?----------------------------| "
- - "L0.?[1686854441162162151,1686854819000000000] 1686932677.39s 30mb |----------L0.?----------| "
- - "L0.?[1686854819000000001,1686854830189955965] 1686932677.39s 910kb |L0.?|"
- - "**** Simulation run 160, type=split(ReduceOverlap)(split_times=[1686856182783783770]). 1 Input Files, 47mb total:"
- - "L0, all files 47mb "
- - "L0.1034[1686855892513513501,1686856473054054039] 1686932677.39s|----------------------------------------L0.1034-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 47mb total:"
- - "L0, all files 24mb "
- - "L0.?[1686855892513513501,1686856182783783770] 1686932677.39s|-------------------L0.?--------------------| "
- - "L0.?[1686856182783783771,1686856473054054039] 1686932677.39s |-------------------L0.?-------------------| "
- - "**** Simulation run 161, type=split(ReduceOverlap)(split_times=[1686857924405405390]). 1 Input Files, 47mb total:"
- - "L0, all files 47mb "
- - "L0.1047[1686857634135135121,1686858214675675659] 1686932677.39s|----------------------------------------L0.1047-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 47mb total:"
- - "L0, all files 24mb "
- - "L0.?[1686857634135135121,1686857924405405390] 1686932677.39s|-------------------L0.?--------------------| "
- - "L0.?[1686857924405405391,1686858214675675659] 1686932677.39s |-------------------L0.?-------------------| "
- - "**** Simulation run 162, type=split(ReduceOverlap)(split_times=[1686859019000000000]). 1 Input Files, 49mb total:"
- - "L0, all files 49mb "
- - "L0.1057[1686858795216216201,1686859375756756740] 1686932677.39s|----------------------------------------L0.1057-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 49mb total:"
- - "L0 "
- - "L0.?[1686858795216216201,1686859019000000000] 1686932677.39s 19mb|--------------L0.?--------------| "
- - "L0.?[1686859019000000001,1686859375756756740] 1686932677.39s 30mb |------------------------L0.?-------------------------| "
- - "**** Simulation run 163, type=split(ReduceOverlap)(split_times=[1686859499000000000, 1686859666027027010]). 1 Input Files, 49mb total:"
- - "L0, all files 49mb "
- - "L0.1058[1686859375756756741,1686859956297297279] 1686932677.39s|----------------------------------------L0.1058-----------------------------------------|"
- - "**** 3 Output Files (parquet_file_id not yet assigned), 49mb total:"
- - "L0 "
- - "L0.?[1686859375756756741,1686859499000000000] 1686932677.39s 10mb|------L0.?-------| "
- - "L0.?[1686859499000000001,1686859666027027010] 1686932677.39s 14mb |---------L0.?----------| "
- - "L0.?[1686859666027027011,1686859956297297279] 1686932677.39s 25mb |-------------------L0.?-------------------| "
- - "**** Simulation run 164, type=split(ReduceOverlap)(split_times=[1686861407648648630]). 1 Input Files, 49mb total:"
- - "L0, all files 49mb "
- - "L0.1073[1686861117378378361,1686861697918918899] 1686932677.39s|----------------------------------------L0.1073-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 49mb total:"
- - "L0 "
- - "L0.?[1686861117378378361,1686861407648648630] 1686932677.39s 25mb|-------------------L0.?--------------------| "
- - "L0.?[1686861407648648631,1686861697918918899] 1686932677.39s 25mb |-------------------L0.?-------------------| "
- - "**** Simulation run 165, type=split(ReduceOverlap)(split_times=[1686863149270270250]). 1 Input Files, 49mb total:"
- - "L0, all files 49mb "
- - "L0.1082[1686862858999999981,1686863439540540519] 1686932677.39s|----------------------------------------L0.1082-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 49mb total:"
- - "L0 "
- - "L0.?[1686862858999999981,1686863149270270250] 1686932677.39s 25mb|-------------------L0.?--------------------| "
- - "L0.?[1686863149270270251,1686863439540540519] 1686932677.39s 25mb |-------------------L0.?-------------------| "
- - "**** Simulation run 166, type=split(ReduceOverlap)(split_times=[1686863699000000000]). 1 Input Files, 48mb total:"
- - "L0, all files 48mb "
- - "L0.995[1686863453232754879,1686864020081081060] 1686932677.39s|-----------------------------------------L0.995-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 48mb total:"
- - "L0 "
- - "L0.?[1686863453232754879,1686863699000000000] 1686932677.39s 21mb|----------------L0.?-----------------| "
- - "L0.?[1686863699000000001,1686864020081081060] 1686932677.39s 27mb |----------------------L0.?----------------------| "
- - "**** Simulation run 167, type=split(ReduceOverlap)(split_times=[1686866632513513490]). 1 Input Files, 100mb total:"
- - "L0, all files 100mb "
- - "L0.1005[1686865761702702681,1686867209822490496] 1686932677.39s|----------------------------------------L0.1005-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L0 "
- - "L0.?[1686865761702702681,1686866632513513490] 1686932677.39s 60mb|------------------------L0.?------------------------| "
- - "L0.?[1686866632513513491,1686867209822490496] 1686932677.39s 40mb |--------------L0.?---------------| "
- - "**** Simulation run 168, type=split(ReduceOverlap)(split_times=[1686867659000000000, 1686867839000000000, 1686868319000000000]). 1 Input Files, 100mb total:"
- - "L0, all files 100mb "
- - "L0.1014[1686867503324324301,1686868747944483205] 1686933271.57s|----------------------------------------L0.1014-----------------------------------------|"
- - "**** 4 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L0 "
- - "L0.?[1686867503324324301,1686867659000000000] 1686933271.57s 13mb|--L0.?---| "
- - "L0.?[1686867659000000001,1686867839000000000] 1686933271.57s 14mb |---L0.?----| "
- - "L0.?[1686867839000000001,1686868319000000000] 1686933271.57s 39mb |--------------L0.?--------------| "
- - "L0.?[1686868319000000001,1686868747944483205] 1686933271.57s 34mb |------------L0.?-------------| "
- - "**** Simulation run 169, type=split(ReduceOverlap)(split_times=[1686852699540540530]). 1 Input Files, 100mb total:"
- - "L0, all files 100mb "
- - "L0.968[1686851828729729721,1686853236700974398] 1686934966.48s|-----------------------------------------L0.968-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L0 "
- - "L0.?[1686851828729729721,1686852699540540530] 1686934966.48s 62mb|------------------------L0.?-------------------------| "
- - "L0.?[1686852699540540531,1686853236700974398] 1686934966.48s 38mb |--------------L0.?--------------| "
- - "**** Simulation run 170, type=split(ReduceOverlap)(split_times=[1686864890891891870]). 1 Input Files, 94mb total:"
- - "L0, all files 94mb "
- - "L0.1001[1686864020081081061,1686865413378378356] 1686934966.48s|----------------------------------------L0.1001-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 94mb total:"
- - "L0 "
- - "L0.?[1686864020081081061,1686864890891891870] 1686934966.48s 58mb|-------------------------L0.?-------------------------| "
- - "L0.?[1686864890891891871,1686865413378378356] 1686934966.48s 35mb |-------------L0.?--------------| "
- - "**** Simulation run 171, type=split(ReduceOverlap)(split_times=[1686854441162162150, 1686854819000000000]). 1 Input Files, 94mb total:"
- - "L0, all files 94mb "
- - "L0.974[1686853570351351341,1686854963648648636] 1686935546.05s|-----------------------------------------L0.974-----------------------------------------|"
- - "**** 3 Output Files (parquet_file_id not yet assigned), 94mb total:"
- - "L0 "
- - "L0.?[1686853570351351341,1686854441162162150] 1686935546.05s 59mb|-------------------------L0.?-------------------------| "
- - "L0.?[1686854441162162151,1686854819000000000] 1686935546.05s 26mb |---------L0.?---------| "
- - "L0.?[1686854819000000001,1686854963648648636] 1686935546.05s 10mb |-L0.?--| "
- - "**** Simulation run 172, type=split(ReduceOverlap)(split_times=[1686856182783783770]). 1 Input Files, 46mb total:"
- - "L0, all files 46mb "
- - "L0.1037[1686855892513513501,1686856473054054039] 1686935742.51s|----------------------------------------L0.1037-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 46mb total:"
- - "L0, all files 23mb "
- - "L0.?[1686855892513513501,1686856182783783770] 1686935742.51s|-------------------L0.?--------------------| "
- - "L0.?[1686856182783783771,1686856473054054039] 1686935742.51s |-------------------L0.?-------------------| "
- - "**** Simulation run 173, type=split(ReduceOverlap)(split_times=[1686842249810810810]). 1 Input Files, 100mb total:"
- - "L0, all files 100mb "
- - "L0.938[1686841379000000000,1686842593136179151] 1686935947.46s|-----------------------------------------L0.938-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L0 "
- - "L0.?[1686841379000000000,1686842249810810810] 1686935947.46s 72mb|-----------------------------L0.?-----------------------------| "
- - "L0.?[1686842249810810811,1686842593136179151] 1686935947.46s 28mb |---------L0.?----------| "
- - "**** Simulation run 174, type=split(ReduceOverlap)(split_times=[1686843991432432430]). 1 Input Files, 100mb total:"
- - "L0, all files 100mb "
- - "L0.943[1686843120621621621,1686844334757800771] 1686935947.46s|-----------------------------------------L0.943-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L0 "
- - "L0.?[1686843120621621621,1686843991432432430] 1686935947.46s 72mb|-----------------------------L0.?-----------------------------| "
- - "L0.?[1686843991432432431,1686844334757800771] 1686935947.46s 28mb |---------L0.?----------| "
- - "**** Simulation run 175, type=split(ReduceOverlap)(split_times=[1686845579000000000, 1686845733054054050]). 1 Input Files, 100mb total:"
- - "L0, all files 100mb "
- - "L0.948[1686844862243243241,1686846076379422391] 1686935947.46s|-----------------------------------------L0.948-----------------------------------------|"
- - "**** 3 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L0 "
- - "L0.?[1686844862243243241,1686845579000000000] 1686935947.46s 59mb|-----------------------L0.?------------------------| "
- - "L0.?[1686845579000000001,1686845733054054050] 1686935947.46s 13mb |--L0.?---| "
- - "L0.?[1686845733054054051,1686846076379422391] 1686935947.46s 28mb |---------L0.?----------| "
- - "**** Simulation run 176, type=split(ReduceOverlap)(split_times=[1686847474675675670]). 1 Input Files, 100mb total:"
- - "L0, all files 100mb "
- - "L0.952[1686846603864864861,1686847818001044011] 1686935947.46s|-----------------------------------------L0.952-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L0 "
- - "L0.?[1686846603864864861,1686847474675675670] 1686935947.46s 72mb|-----------------------------L0.?-----------------------------| "
- - "L0.?[1686847474675675671,1686847818001044011] 1686935947.46s 28mb |---------L0.?----------| "
- - "**** Simulation run 177, type=split(ReduceOverlap)(split_times=[1686849216297297290]). 1 Input Files, 100mb total:"
- - "L0, all files 100mb "
- - "L0.957[1686848345486486481,1686849568759166090] 1686935947.46s|-----------------------------------------L0.957-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L0 "
- - "L0.?[1686848345486486481,1686849216297297290] 1686935947.46s 71mb|-----------------------------L0.?-----------------------------| "
- - "L0.?[1686849216297297291,1686849568759166090] 1686935947.46s 29mb |---------L0.?----------| "
- - "**** Simulation run 178, type=split(ReduceOverlap)(split_times=[1686849779000000000]). 1 Input Files, 42mb total:"
- - "L0, all files 42mb "
- - "L0.958[1686849568759166091,1686850087108108100] 1686935947.46s|-----------------------------------------L0.958-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 42mb total:"
- - "L0 "
- - "L0.?[1686849568759166091,1686849779000000000] 1686935947.46s 17mb|---------------L0.?---------------| "
- - "L0.?[1686849779000000001,1686850087108108100] 1686935947.46s 25mb |-----------------------L0.?------------------------| "
- - "**** Simulation run 179, type=split(ReduceOverlap)(split_times=[1686850559000000000, 1686850957918918910]). 1 Input Files, 100mb total:"
- - "L0, all files 100mb "
- - "L0.962[1686850087108108101,1686851301244287251] 1686935947.46s|-----------------------------------------L0.962-----------------------------------------|"
- - "**** 3 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L0 "
- - "L0.?[1686850087108108101,1686850559000000000] 1686935947.46s 39mb|--------------L0.?--------------| "
- - "L0.?[1686850559000000001,1686850957918918910] 1686935947.46s 33mb |-----------L0.?------------| "
- - "L0.?[1686850957918918911,1686851301244287251] 1686935947.46s 28mb |---------L0.?----------| "
- - "**** Simulation run 180, type=split(ReduceOverlap)(split_times=[1686857924405405390]). 1 Input Files, 48mb total:"
- - "L0, all files 48mb "
- - "L0.1050[1686857634135135121,1686858214675675659] 1686935947.46s|----------------------------------------L0.1050-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 48mb total:"
- - "L0, all files 24mb "
- - "L0.?[1686857634135135121,1686857924405405390] 1686935947.46s|-------------------L0.?--------------------| "
- - "L0.?[1686857924405405391,1686858214675675659] 1686935947.46s |-------------------L0.?-------------------| "
- - "**** Simulation run 181, type=split(ReduceOverlap)(split_times=[1686859019000000000]). 1 Input Files, 49mb total:"
- - "L0, all files 49mb "
- - "L0.1060[1686858795216216201,1686859375756756740] 1686935947.46s|----------------------------------------L0.1060-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 49mb total:"
- - "L0 "
- - "L0.?[1686858795216216201,1686859019000000000] 1686935947.46s 19mb|--------------L0.?--------------| "
- - "L0.?[1686859019000000001,1686859375756756740] 1686935947.46s 30mb |------------------------L0.?-------------------------| "
- - "**** Simulation run 182, type=split(ReduceOverlap)(split_times=[1686859499000000000, 1686859666027027010]). 1 Input Files, 49mb total:"
- - "L0, all files 49mb "
- - "L0.1061[1686859375756756741,1686859956297297279] 1686935947.46s|----------------------------------------L0.1061-----------------------------------------|"
- - "**** 3 Output Files (parquet_file_id not yet assigned), 49mb total:"
- - "L0 "
- - "L0.?[1686859375756756741,1686859499000000000] 1686935947.46s 10mb|------L0.?-------| "
- - "L0.?[1686859499000000001,1686859666027027010] 1686935947.46s 14mb |---------L0.?----------| "
- - "L0.?[1686859666027027011,1686859956297297279] 1686935947.46s 24mb |-------------------L0.?-------------------| "
- - "**** Simulation run 183, type=split(ReduceOverlap)(split_times=[1686861407648648630]). 1 Input Files, 49mb total:"
- - "L0, all files 49mb "
- - "L0.1076[1686861117378378361,1686861697918918899] 1686935947.46s|----------------------------------------L0.1076-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 49mb total:"
- - "L0, all files 24mb "
- - "L0.?[1686861117378378361,1686861407648648630] 1686935947.46s|-------------------L0.?--------------------| "
- - "L0.?[1686861407648648631,1686861697918918899] 1686935947.46s |-------------------L0.?-------------------| "
- - "**** Simulation run 184, type=split(ReduceOverlap)(split_times=[1686863149270270250]). 1 Input Files, 46mb total:"
- - "L0, all files 46mb "
- - "L0.1085[1686862858999999981,1686863439540540519] 1686935947.46s|----------------------------------------L0.1085-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 46mb total:"
- - "L0, all files 23mb "
- - "L0.?[1686862858999999981,1686863149270270250] 1686935947.46s|-------------------L0.?--------------------| "
- - "L0.?[1686863149270270251,1686863439540540519] 1686935947.46s |-------------------L0.?-------------------| "
- - "**** Simulation run 185, type=split(ReduceOverlap)(split_times=[1686863699000000000]). 1 Input Files, 39mb total:"
- - "L0, all files 39mb "
- - "L0.997[1686863528879205202,1686864020081081060] 1686935947.46s|-----------------------------------------L0.997-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 39mb total:"
- - "L0 "
- - "L0.?[1686863528879205202,1686863699000000000] 1686935947.46s 14mb|------------L0.?-------------| "
- - "L0.?[1686863699000000001,1686864020081081060] 1686935947.46s 26mb |--------------------------L0.?--------------------------| "
- - "**** Simulation run 186, type=split(ReduceOverlap)(split_times=[1686866632513513490]). 1 Input Files, 95mb total:"
- - "L0, all files 95mb "
- - "L0.1011[1686865761702702681,1686867154999999976] 1686935947.46s|----------------------------------------L0.1011-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 95mb total:"
- - "L0 "
- - "L0.?[1686865761702702681,1686866632513513490] 1686935947.46s 59mb|-------------------------L0.?-------------------------| "
- - "L0.?[1686866632513513491,1686867154999999976] 1686935947.46s 36mb |-------------L0.?--------------| "
- - "**** Simulation run 187, type=split(ReduceOverlap)(split_times=[1686867659000000000, 1686867839000000000, 1686868319000000000]). 1 Input Files, 93mb total:"
- - "L0, all files 93mb "
- - "L0.1016[1686867503324324301,1686868896621621596] 1686935947.46s|----------------------------------------L0.1016-----------------------------------------|"
- - "**** 4 Output Files (parquet_file_id not yet assigned), 93mb total:"
- - "L0 "
- - "L0.?[1686867503324324301,1686867659000000000] 1686935947.46s 10mb|--L0.?--| "
- - "L0.?[1686867659000000001,1686867839000000000] 1686935947.46s 12mb |--L0.?---| "
- - "L0.?[1686867839000000001,1686868319000000000] 1686935947.46s 32mb |------------L0.?-------------| "
- - "L0.?[1686868319000000001,1686868896621621596] 1686935947.46s 38mb |---------------L0.?----------------| "
- - "**** Simulation run 188, type=split(ReduceOverlap)(split_times=[1686842249810810810]). 1 Input Files, 6mb total:"
- - "L0, all files 6mb "
- - "L0.940[1686841379000000000,1686843120621621620] 1686936871.55s|-----------------------------------------L0.940-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 6mb total:"
- - "L0, all files 3mb "
- - "L0.?[1686841379000000000,1686842249810810810] 1686936871.55s|-------------------L0.?--------------------| "
- - "L0.?[1686842249810810811,1686843120621621620] 1686936871.55s |-------------------L0.?-------------------| "
- - "**** Simulation run 189, type=split(ReduceOverlap)(split_times=[1686843991432432430]). 1 Input Files, 6mb total:"
- - "L0, all files 6mb "
- - "L0.945[1686843120621621621,1686844862243243240] 1686936871.55s|-----------------------------------------L0.945-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 6mb total:"
- - "L0 "
- - "L0.?[1686843120621621621,1686843991432432430] 1686936871.55s 3mb|-------------------L0.?-------------------| "
- - "L0.?[1686843991432432431,1686844862243243240] 1686936871.55s 3mb |-------------------L0.?-------------------| "
- - "**** Simulation run 190, type=split(ReduceOverlap)(split_times=[1686845579000000000, 1686845733054054050]). 1 Input Files, 6mb total:"
- - "L0, all files 6mb "
- - "L0.964[1686844862243243241,1686846603864864860] 1686936871.55s|-----------------------------------------L0.964-----------------------------------------|"
- - "**** 3 Output Files (parquet_file_id not yet assigned), 6mb total:"
- - "L0 "
- - "L0.?[1686844862243243241,1686845579000000000] 1686936871.55s 2mb|---------------L0.?----------------| "
- - "L0.?[1686845579000000001,1686845733054054050] 1686936871.55s 510kb |L0.?-| "
- - "L0.?[1686845733054054051,1686846603864864860] 1686936871.55s 3mb |-------------------L0.?-------------------| "
- - "**** Simulation run 191, type=split(ReduceOverlap)(split_times=[1686847474675675670]). 1 Input Files, 6mb total:"
- - "L0, all files 6mb "
- - "L0.954[1686846603864864861,1686848345486486480] 1686936871.55s|-----------------------------------------L0.954-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 6mb total:"
- - "L0 "
- - "L0.?[1686846603864864861,1686847474675675670] 1686936871.55s 3mb|-------------------L0.?-------------------| "
- - "L0.?[1686847474675675671,1686848345486486480] 1686936871.55s 3mb |-------------------L0.?-------------------| "
- - "**** Simulation run 192, type=split(ReduceOverlap)(split_times=[1686849216297297290, 1686849779000000000]). 1 Input Files, 11mb total:"
- - "L0, all files 11mb "
- - "L0.959[1686848345486486481,1686850087108108100] 1686936871.55s|-----------------------------------------L0.959-----------------------------------------|"
- - "**** 3 Output Files (parquet_file_id not yet assigned), 11mb total:"
- - "L0 "
- - "L0.?[1686848345486486481,1686849216297297290] 1686936871.55s 6mb|-------------------L0.?-------------------| "
- - "L0.?[1686849216297297291,1686849779000000000] 1686936871.55s 4mb |-----------L0.?------------| "
- - "L0.?[1686849779000000001,1686850087108108100] 1686936871.55s 2mb |----L0.?-----| "
- - "**** Simulation run 193, type=split(ReduceOverlap)(split_times=[1686850559000000000, 1686850957918918910]). 1 Input Files, 6mb total:"
- - "L0, all files 6mb "
- - "L0.965[1686850087108108101,1686851828729729720] 1686936871.55s|-----------------------------------------L0.965-----------------------------------------|"
- - "**** 3 Output Files (parquet_file_id not yet assigned), 6mb total:"
- - "L0 "
- - "L0.?[1686850087108108101,1686850559000000000] 1686936871.55s 2mb|---------L0.?---------| "
- - "L0.?[1686850559000000001,1686850957918918910] 1686936871.55s 1mb |-------L0.?-------| "
- - "L0.?[1686850957918918911,1686851828729729720] 1686936871.55s 3mb |-------------------L0.?-------------------| "
- - "**** Simulation run 194, type=split(ReduceOverlap)(split_times=[1686852699540540530]). 1 Input Files, 55mb total:"
- - "L0, all files 55mb "
- - "L0.970[1686851828729729721,1686853222027027016] 1686936871.55s|-----------------------------------------L0.970-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 55mb total:"
- - "L0 "
- - "L0.?[1686851828729729721,1686852699540540530] 1686936871.55s 34mb|-------------------------L0.?-------------------------| "
- - "L0.?[1686852699540540531,1686853222027027016] 1686936871.55s 21mb |-------------L0.?--------------| "
- - "**** Simulation run 195, type=split(ReduceOverlap)(split_times=[1686854441162162150, 1686854819000000000]). 1 Input Files, 29mb total:"
- - "L0, all files 29mb "
- - "L0.976[1686853570351351341,1686854963648648636] 1686936871.55s|-----------------------------------------L0.976-----------------------------------------|"
- - "**** 3 Output Files (parquet_file_id not yet assigned), 29mb total:"
- - "L0 "
- - "L0.?[1686853570351351341,1686854441162162150] 1686936871.55s 18mb|-------------------------L0.?-------------------------| "
- - "L0.?[1686854441162162151,1686854819000000000] 1686936871.55s 8mb |---------L0.?---------| "
- - "L0.?[1686854819000000001,1686854963648648636] 1686936871.55s 3mb |-L0.?--| "
- - "**** Simulation run 196, type=split(ReduceOverlap)(split_times=[1686856182783783770]). 1 Input Files, 6mb total:"
- - "L0, all files 6mb "
- - "L0.1040[1686855892513513501,1686856473054054039] 1686936871.55s|----------------------------------------L0.1040-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 6mb total:"
- - "L0 "
- - "L0.?[1686855892513513501,1686856182783783770] 1686936871.55s 3mb|-------------------L0.?--------------------| "
- - "L0.?[1686856182783783771,1686856473054054039] 1686936871.55s 3mb |-------------------L0.?-------------------| "
- - "**** Simulation run 197, type=split(ReduceOverlap)(split_times=[1686857924405405390]). 1 Input Files, 4mb total:"
- - "L0, all files 4mb "
- - "L0.1053[1686857634135135121,1686858214675675659] 1686936871.55s|----------------------------------------L0.1053-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 4mb total:"
- - "L0, all files 2mb "
- - "L0.?[1686857634135135121,1686857924405405390] 1686936871.55s|-------------------L0.?--------------------| "
- - "L0.?[1686857924405405391,1686858214675675659] 1686936871.55s |-------------------L0.?-------------------| "
- - "**** Simulation run 198, type=split(ReduceOverlap)(split_times=[1686859019000000000]). 1 Input Files, 2mb total:"
- - "L0, all files 2mb "
- - "L0.1063[1686858795216216201,1686859375756756740] 1686936871.55s|----------------------------------------L0.1063-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 2mb total:"
- - "L0 "
- - "L0.?[1686858795216216201,1686859019000000000] 1686936871.55s 741kb|--------------L0.?--------------| "
- - "L0.?[1686859019000000001,1686859375756756740] 1686936871.55s 1mb |------------------------L0.?-------------------------| "
- - "**** Simulation run 199, type=split(ReduceOverlap)(split_times=[1686859499000000000, 1686859666027027010]). 1 Input Files, 2mb total:"
- - "L0, all files 2mb "
- - "L0.1064[1686859375756756741,1686859956297297279] 1686936871.55s|----------------------------------------L0.1064-----------------------------------------|"
- - "**** 3 Output Files (parquet_file_id not yet assigned), 2mb total:"
- - "L0 "
- - "L0.?[1686859375756756741,1686859499000000000] 1686936871.55s 408kb|------L0.?-------| "
- - "L0.?[1686859499000000001,1686859666027027010] 1686936871.55s 553kb |---------L0.?----------| "
- - "L0.?[1686859666027027011,1686859956297297279] 1686936871.55s 962kb |-------------------L0.?-------------------| "
- - "**** Simulation run 200, type=split(ReduceOverlap)(split_times=[1686861407648648630]). 1 Input Files, 2mb total:"
- - "L0, all files 2mb "
- - "L0.1079[1686861117378378361,1686861697918918899] 1686936871.55s|----------------------------------------L0.1079-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 2mb total:"
- - "L0, all files 962kb "
- - "L0.?[1686861117378378361,1686861407648648630] 1686936871.55s|-------------------L0.?--------------------| "
- - "L0.?[1686861407648648631,1686861697918918899] 1686936871.55s |-------------------L0.?-------------------| "
- - "**** Simulation run 201, type=split(ReduceOverlap)(split_times=[1686863149270270250]). 1 Input Files, 4mb total:"
- - "L0, all files 4mb "
- - "L0.1088[1686862858999999981,1686863439540540519] 1686936871.55s|----------------------------------------L0.1088-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 4mb total:"
- - "L0, all files 2mb "
- - "L0.?[1686862858999999981,1686863149270270250] 1686936871.55s|-------------------L0.?--------------------| "
- - "L0.?[1686863149270270251,1686863439540540519] 1686936871.55s |-------------------L0.?-------------------| "
- - "**** Simulation run 202, type=split(ReduceOverlap)(split_times=[1686863699000000000]). 1 Input Files, 4mb total:"
- - "L0, all files 4mb "
- - "L0.1089[1686863439540540520,1686864020081081060] 1686936871.55s|----------------------------------------L0.1089-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 4mb total:"
- - "L0 "
- - "L0.?[1686863439540540520,1686863699000000000] 1686936871.55s 2mb|-----------------L0.?-----------------| "
- - "L0.?[1686863699000000001,1686864020081081060] 1686936871.55s 2mb |---------------------L0.?----------------------| "
- - "**** Simulation run 203, type=split(ReduceOverlap)(split_times=[1686864890891891870]). 1 Input Files, 44mb total:"
- - "L0, all files 44mb "
- - "L0.1003[1686864020081081061,1686865413378378356] 1686936871.55s|----------------------------------------L0.1003-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 44mb total:"
- - "L0 "
- - "L0.?[1686864020081081061,1686864890891891870] 1686936871.55s 27mb|-------------------------L0.?-------------------------| "
- - "L0.?[1686864890891891871,1686865413378378356] 1686936871.55s 16mb |-------------L0.?--------------| "
- - "**** Simulation run 204, type=split(ReduceOverlap)(split_times=[1686866632513513490]). 1 Input Files, 11mb total:"
- - "L0, all files 11mb "
- - "L0.1013[1686865761702702681,1686867503324324300] 1686936871.55s|----------------------------------------L0.1013-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 11mb total:"
- - "L0 "
- - "L0.?[1686865761702702681,1686866632513513490] 1686936871.55s 6mb|-------------------L0.?-------------------| "
- - "L0.?[1686866632513513491,1686867503324324300] 1686936871.55s 6mb |-------------------L0.?-------------------| "
- - "**** Simulation run 205, type=split(ReduceOverlap)(split_times=[1686867659000000000, 1686867839000000000, 1686868319000000000]). 1 Input Files, 17mb total:"
- - "L0, all files 17mb "
- - "L0.1018[1686867503324324301,1686869244945945920] 1686936871.55s|----------------------------------------L0.1018-----------------------------------------|"
- - "**** 4 Output Files (parquet_file_id not yet assigned), 17mb total:"
- - "L0 "
- - "L0.?[1686867503324324301,1686867659000000000] 1686936871.55s 1mb|-L0.?-| "
- - "L0.?[1686867659000000001,1686867839000000000] 1686936871.55s 2mb |-L0.?--| "
- - "L0.?[1686867839000000001,1686868319000000000] 1686936871.55s 5mb |---------L0.?---------| "
- - "L0.?[1686868319000000001,1686869244945945920] 1686936871.55s 9mb |--------------------L0.?---------------------| "
- - "Committing partition 1:"
- - " Soft Deleting 56 files: L0.936, L0.938, L0.940, L0.941, L0.943, L0.945, L0.946, L0.948, L0.950, L0.952, L0.954, L0.955, L0.956, L0.957, L0.958, L0.959, L0.960, L0.962, L0.964, L0.965, L0.966, L0.968, L0.970, L0.972, L0.974, L0.976, L0.995, L0.997, L0.999, L0.1001, L0.1003, L0.1005, L0.1011, L0.1013, L0.1014, L0.1016, L0.1018, L0.1034, L0.1037, L0.1040, L0.1047, L0.1050, L0.1053, L0.1057, L0.1058, L0.1060, L0.1061, L0.1063, L0.1064, L0.1073, L0.1076, L0.1079, L0.1082, L0.1085, L0.1088, L0.1089"
- - " Creating 131 files"
- - "**** Simulation run 206, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1686858240168622590, 1686864651607515459]). 36 Input Files, 217mb total:"
- - "L0 "
- - "L0.1096[1686851828729729721,1686852699540540530] 1686931600.58s 51mb|L0.1096| "
- - "L0.1097[1686852699540540531,1686853222027027016] 1686931600.58s 31mb |L0.1097| "
- - "L0.967[1686853222027027017,1686853570351351340] 1686931600.58s 20mb |L0.967| "
- - "L0.1098[1686864020081081061,1686864890891891870] 1686931893.7s 49mb |L0.1098| "
- - "L0.1099[1686864890891891871,1686865413378378356] 1686931893.7s 29mb |L0.1099|"
- - "L0.1000[1686865413378378357,1686865761702702680] 1686931893.7s 20mb |L0.1000|"
- - "L1 "
- - "L1.434[1686851828729729721,1686852699540540530] 1686928854.57s 1mb|L1.434| "
- - "L1.435[1686852699540540531,1686853570351351340] 1686928854.57s 1mb |L1.435| "
- - "L1.436[1686853570351351341,1686854441162162150] 1686928854.57s 1mb |L1.436| "
- - "L1.437[1686854441162162151,1686854819000000000] 1686928854.57s 490kb |L1.437| "
- - "L1.655[1686854879000000000,1686855311972972960] 1686928854.57s 556kb |L1.655| "
- - "L1.1031[1686855311972972961,1686855892513513500] 1686928854.57s 746kb |L1.1031| "
- - "L1.1032[1686855892513513501,1686856182783783770] 1686928854.57s 373kb |L1.1032| "
- - "L1.1042[1686856182783783771,1686856473054054039] 1686928854.57s 373kb |L1.1042| "
- - "L1.1043[1686856473054054040,1686857053594594580] 1686928854.57s 746kb |L1.1043| "
- - "L1.1044[1686857053594594581,1686857634135135120] 1686928854.57s 746kb |L1.1044| "
- - "L1.1045[1686857634135135121,1686857924405405390] 1686928854.57s 373kb |L1.1045| "
- - "L1.1055[1686857924405405391,1686858214675675659] 1686928854.57s 373kb |L1.1055| "
- - "L1.1056[1686858214675675660,1686858795216216200] 1686928854.57s 746kb |L1.1056| "
- - "L1.660[1686858795216216201,1686859019000000000] 1686928854.57s 287kb |L1.660| "
- - "L1.1066[1686859079000000000,1686859375756756740] 1686928854.57s 409kb |L1.1066| "
- - "L1.1067[1686859375756756741,1686859499000000000] 1686928854.57s 170kb |L1.1067| "
- - "L1.739[1686859559000000000,1686859666027027010] 1686928854.57s 137kb |L1.739| "
- - "L1.1068[1686859666027027011,1686859956297297279] 1686928854.57s 371kb |L1.1068| "
- - "L1.1069[1686859956297297280,1686860536837837820] 1686928854.57s 743kb |L1.1069| "
- - "L1.1070[1686860536837837821,1686861117378378360] 1686928854.57s 743kb |L1.1070| "
- - "L1.1071[1686861117378378361,1686861407648648630] 1686928854.57s 371kb |L1.1071| "
- - "L1.1092[1686861407648648631,1686861697918918899] 1686928854.57s 371kb |L1.1092| "
- - "L1.1093[1686861697918918900,1686862278459459440] 1686928854.57s 743kb |L1.1093| "
- - "L1.1094[1686862278459459441,1686862858999999980] 1686928854.57s 743kb |L1.1094| "
- - "L1.1095[1686862858999999981,1686863149270270250] 1686928854.57s 371kb |L1.1095| "
- - "L1.1090[1686863149270270251,1686863439540540519] 1686928854.57s 371kb |L1.1090| "
- - "L1.1091[1686863439540540520,1686863699000000000] 1686928854.57s 332kb |L1.1091| "
- - "L1.758[1686863759000000000,1686864020081081060] 1686928854.57s 329kb |L1.758| "
- - "L1.759[1686864020081081061,1686864890891891870] 1686928854.57s 1mb |L1.759| "
- - "L1.760[1686864890891891871,1686865761702702680] 1686928854.57s 1mb |L1.760|"
- - "**** 3 Output Files (parquet_file_id not yet assigned), 217mb total:"
- - "L1 "
- - "L1.?[1686851828729729721,1686858240168622590] 1686931893.7s 100mb|-----------------L1.?------------------| "
- - "L1.?[1686858240168622591,1686864651607515459] 1686931893.7s 100mb |-----------------L1.?------------------| "
- - "L1.?[1686864651607515460,1686865761702702680] 1686931893.7s 17mb |L1.?-| "
- - "Committing partition 1:"
- - " Soft Deleting 36 files: L1.434, L1.435, L1.436, L1.437, L1.655, L1.660, L1.739, L1.758, L1.759, L1.760, L0.967, L0.1000, L1.1031, L1.1032, L1.1042, L1.1043, L1.1044, L1.1045, L1.1055, L1.1056, L1.1066, L1.1067, L1.1068, L1.1069, L1.1070, L1.1071, L1.1090, L1.1091, L1.1092, L1.1093, L1.1094, L1.1095, L0.1096, L0.1097, L0.1098, L0.1099"
- - " Creating 3 files"
- - "**** Simulation run 207, type=split(HighL0OverlapTotalBacklog)(split_times=[1686852757594594584, 1686853686459459447, 1686854615324324310, 1686855544189189173, 1686856473054054036, 1686857401918918899]). 1 Input Files, 100mb total:"
- - "L1, all files 100mb "
- - "L1.1227[1686851828729729721,1686858240168622590] 1686931893.7s|----------------------------------------L1.1227-----------------------------------------|"
- - "**** 7 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L1 "
- - "L1.?[1686851828729729721,1686852757594594584] 1686931893.7s 14mb|---L1.?----| "
- - "L1.?[1686852757594594585,1686853686459459447] 1686931893.7s 14mb |---L1.?----| "
- - "L1.?[1686853686459459448,1686854615324324310] 1686931893.7s 14mb |---L1.?----| "
- - "L1.?[1686854615324324311,1686855544189189173] 1686931893.7s 14mb |---L1.?----| "
- - "L1.?[1686855544189189174,1686856473054054036] 1686931893.7s 14mb |---L1.?----| "
- - "L1.?[1686856473054054037,1686857401918918899] 1686931893.7s 14mb |---L1.?----| "
- - "L1.?[1686857401918918900,1686858240168622590] 1686931893.7s 13mb |--L1.?---| "
- - "**** Simulation run 208, type=split(HighL0OverlapTotalBacklog)(split_times=[1686852757594594584]). 1 Input Files, 38mb total:"
- - "L0, all files 38mb "
- - "L0.1141[1686852699540540531,1686853236700974398] 1686934966.48s|----------------------------------------L0.1141-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 38mb total:"
- - "L0 "
- - "L0.?[1686852699540540531,1686852757594594584] 1686934966.48s 4mb|-L0.?--| "
- - "L0.?[1686852757594594585,1686853236700974398] 1686934966.48s 34mb |-------------------------------------L0.?-------------------------------------| "
- - "**** Simulation run 209, type=split(HighL0OverlapTotalBacklog)(split_times=[1686852757594594584]). 1 Input Files, 21mb total:"
- - "L0, all files 21mb "
- - "L0.1200[1686852699540540531,1686853222027027016] 1686936871.55s|----------------------------------------L0.1200-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 21mb total:"
- - "L0 "
- - "L0.?[1686852699540540531,1686852757594594584] 1686936871.55s 2mb|-L0.?--| "
- - "L0.?[1686852757594594585,1686853222027027016] 1686936871.55s 18mb |------------------------------------L0.?-------------------------------------| "
- - "**** Simulation run 210, type=split(HighL0OverlapTotalBacklog)(split_times=[1686853686459459447]). 1 Input Files, 69mb total:"
- - "L0, all files 69mb "
- - "L0.1116[1686853570351351341,1686854441162162150] 1686932677.39s|----------------------------------------L0.1116-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 69mb total:"
- - "L0 "
- - "L0.?[1686853570351351341,1686853686459459447] 1686932677.39s 9mb|--L0.?---| "
- - "L0.?[1686853686459459448,1686854441162162150] 1686932677.39s 60mb |------------------------------------L0.?------------------------------------| "
- - "**** Simulation run 211, type=split(HighL0OverlapTotalBacklog)(split_times=[1686853686459459447]). 1 Input Files, 59mb total:"
- - "L0, all files 59mb "
- - "L0.1144[1686853570351351341,1686854441162162150] 1686935546.05s|----------------------------------------L0.1144-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 59mb total:"
- - "L0 "
- - "L0.?[1686853570351351341,1686853686459459447] 1686935546.05s 8mb|--L0.?---| "
- - "L0.?[1686853686459459448,1686854441162162150] 1686935546.05s 51mb |------------------------------------L0.?------------------------------------| "
- - "**** Simulation run 212, type=split(HighL0OverlapTotalBacklog)(split_times=[1686853686459459447]). 1 Input Files, 18mb total:"
- - "L0, all files 18mb "
- - "L0.1201[1686853570351351341,1686854441162162150] 1686936871.55s|----------------------------------------L0.1201-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 18mb total:"
- - "L0 "
- - "L0.?[1686853570351351341,1686853686459459447] 1686936871.55s 2mb|--L0.?---| "
- - "L0.?[1686853686459459448,1686854441162162150] 1686936871.55s 16mb |------------------------------------L0.?------------------------------------| "
- - "**** Simulation run 213, type=split(HighL0OverlapTotalBacklog)(split_times=[1686854615324324310]). 1 Input Files, 30mb total:"
- - "L0, all files 30mb "
- - "L0.1117[1686854441162162151,1686854819000000000] 1686932677.39s|----------------------------------------L0.1117-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 30mb total:"
- - "L0 "
- - "L0.?[1686854441162162151,1686854615324324310] 1686932677.39s 14mb|-----------------L0.?------------------| "
- - "L0.?[1686854615324324311,1686854819000000000] 1686932677.39s 16mb |---------------------L0.?---------------------| "
- - "**** Simulation run 214, type=split(HighL0OverlapTotalBacklog)(split_times=[1686854615324324310]). 1 Input Files, 26mb total:"
- - "L0, all files 26mb "
- - "L0.1145[1686854441162162151,1686854819000000000] 1686935546.05s|----------------------------------------L0.1145-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 26mb total:"
- - "L0 "
- - "L0.?[1686854441162162151,1686854615324324310] 1686935546.05s 12mb|-----------------L0.?------------------| "
- - "L0.?[1686854615324324311,1686854819000000000] 1686935546.05s 14mb |---------------------L0.?---------------------| "
- - "**** Simulation run 215, type=split(HighL0OverlapTotalBacklog)(split_times=[1686854615324324310]). 1 Input Files, 8mb total:"
- - "L0, all files 8mb "
- - "L0.1202[1686854441162162151,1686854819000000000] 1686936871.55s|----------------------------------------L0.1202-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 8mb total:"
- - "L0 "
- - "L0.?[1686854441162162151,1686854615324324310] 1686936871.55s 4mb|-----------------L0.?------------------| "
- - "L0.?[1686854615324324311,1686854819000000000] 1686936871.55s 4mb |---------------------L0.?---------------------| "
- - "**** Simulation run 216, type=split(HighL0OverlapTotalBacklog)(split_times=[1686855544189189173]). 1 Input Files, 47mb total:"
- - "L0, all files 47mb "
- - "L0.1033[1686855311972972961,1686855892513513500] 1686932677.39s|----------------------------------------L0.1033-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 47mb total:"
- - "L0 "
- - "L0.?[1686855311972972961,1686855544189189173] 1686932677.39s 19mb|--------------L0.?---------------| "
- - "L0.?[1686855544189189174,1686855892513513500] 1686932677.39s 28mb |------------------------L0.?------------------------| "
- - "**** Simulation run 217, type=split(HighL0OverlapTotalBacklog)(split_times=[1686855544189189173]). 1 Input Files, 46mb total:"
- - "L0, all files 46mb "
- - "L0.1036[1686855311972972961,1686855892513513500] 1686935742.51s|----------------------------------------L0.1036-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 46mb total:"
- - "L0 "
- - "L0.?[1686855311972972961,1686855544189189173] 1686935742.51s 19mb|--------------L0.?---------------| "
- - "L0.?[1686855544189189174,1686855892513513500] 1686935742.51s 28mb |------------------------L0.?------------------------| "
- - "**** Simulation run 218, type=split(HighL0OverlapTotalBacklog)(split_times=[1686855544189189173]). 1 Input Files, 6mb total:"
- - "L0, all files 6mb "
- - "L0.1039[1686855311972972961,1686855892513513500] 1686936871.55s|----------------------------------------L0.1039-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 6mb total:"
- - "L0 "
- - "L0.?[1686855311972972961,1686855544189189173] 1686936871.55s 2mb|--------------L0.?---------------| "
- - "L0.?[1686855544189189174,1686855892513513500] 1686936871.55s 3mb |------------------------L0.?------------------------| "
- - "**** Simulation run 219, type=split(HighL0OverlapTotalBacklog)(split_times=[1686856473054054036]). 1 Input Files, 24mb total:"
- - "L0, all files 24mb "
- - "L0.1120[1686856182783783771,1686856473054054039] 1686932677.39s|----------------------------------------L0.1120-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 24mb total:"
- - "L0 "
- - "L0.?[1686856182783783771,1686856473054054036] 1686932677.39s 24mb|-----------------------------------------L0.?------------------------------------------| "
- - "L0.?[1686856473054054037,1686856473054054039] 1686932677.39s 1b |L0.?|"
- - "**** Simulation run 220, type=split(HighL0OverlapTotalBacklog)(split_times=[1686856473054054036]). 1 Input Files, 23mb total:"
- - "L0, all files 23mb "
- - "L0.1148[1686856182783783771,1686856473054054039] 1686935742.51s|----------------------------------------L0.1148-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 23mb total:"
- - "L0 "
- - "L0.?[1686856182783783771,1686856473054054036] 1686935742.51s 23mb|-----------------------------------------L0.?------------------------------------------| "
- - "L0.?[1686856473054054037,1686856473054054039] 1686935742.51s 1b |L0.?|"
- - "**** Simulation run 221, type=split(HighL0OverlapTotalBacklog)(split_times=[1686856473054054036]). 1 Input Files, 3mb total:"
- - "L0, all files 3mb "
- - "L0.1205[1686856182783783771,1686856473054054039] 1686936871.55s|----------------------------------------L0.1205-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 3mb total:"
- - "L0 "
- - "L0.?[1686856182783783771,1686856473054054036] 1686936871.55s 3mb|-----------------------------------------L0.?------------------------------------------| "
- - "L0.?[1686856473054054037,1686856473054054039] 1686936871.55s 1b |L0.?|"
- - "**** Simulation run 222, type=split(HighL0OverlapTotalBacklog)(split_times=[1686857401918918899]). 1 Input Files, 47mb total:"
- - "L0, all files 47mb "
- - "L0.1046[1686857053594594581,1686857634135135120] 1686932677.39s|----------------------------------------L0.1046-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 47mb total:"
- - "L0 "
- - "L0.?[1686857053594594581,1686857401918918899] 1686932677.39s 28mb|-----------------------L0.?------------------------| "
- - "L0.?[1686857401918918900,1686857634135135120] 1686932677.39s 19mb |---------------L0.?---------------| "
- - "**** Simulation run 223, type=split(HighL0OverlapTotalBacklog)(split_times=[1686857401918918899]). 1 Input Files, 48mb total:"
- - "L0, all files 48mb "
- - "L0.1049[1686857053594594581,1686857634135135120] 1686935947.46s|----------------------------------------L0.1049-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 48mb total:"
- - "L0 "
- - "L0.?[1686857053594594581,1686857401918918899] 1686935947.46s 29mb|-----------------------L0.?------------------------| "
- - "L0.?[1686857401918918900,1686857634135135120] 1686935947.46s 19mb |---------------L0.?---------------| "
- - "**** Simulation run 224, type=split(HighL0OverlapTotalBacklog)(split_times=[1686857401918918899]). 1 Input Files, 4mb total:"
- - "L0, all files 4mb "
- - "L0.1052[1686857053594594581,1686857634135135120] 1686936871.55s|----------------------------------------L0.1052-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 4mb total:"
- - "L0 "
- - "L0.?[1686857053594594581,1686857401918918899] 1686936871.55s 2mb|-----------------------L0.?------------------------| "
- - "L0.?[1686857401918918900,1686857634135135120] 1686936871.55s 2mb |---------------L0.?---------------| "
- - "**** Simulation run 225, type=split(HighL0OverlapTotalBacklog)(split_times=[1686858330783783762]). 1 Input Files, 4mb total:"
- - "L0, all files 4mb "
- - "L0.1054[1686858214675675660,1686858795216216200] 1686936871.55s|----------------------------------------L0.1054-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 4mb total:"
- - "L0 "
- - "L0.?[1686858214675675660,1686858330783783762] 1686936871.55s 782kb|-----L0.?------| "
- - "L0.?[1686858330783783763,1686858795216216200] 1686936871.55s 3mb |---------------------------------L0.?---------------------------------| "
- - "**** Simulation run 226, type=split(HighL0OverlapTotalBacklog)(split_times=[1686858330783783762, 1686859259648648625, 1686860188513513488, 1686861117378378351, 1686862046243243214, 1686862975108108077, 1686863903972972940]). 1 Input Files, 100mb total:"
- - "L1, all files 100mb "
- - "L1.1228[1686858240168622591,1686864651607515459] 1686931893.7s|----------------------------------------L1.1228-----------------------------------------|"
- - "**** 8 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L1 "
- - "L1.?[1686858240168622591,1686858330783783762] 1686931893.7s 1mb|L1.?| "
- - "L1.?[1686858330783783763,1686859259648648625] 1686931893.7s 14mb |---L1.?----| "
- - "L1.?[1686859259648648626,1686860188513513488] 1686931893.7s 14mb |---L1.?----| "
- - "L1.?[1686860188513513489,1686861117378378351] 1686931893.7s 14mb |---L1.?----| "
- - "L1.?[1686861117378378352,1686862046243243214] 1686931893.7s 14mb |---L1.?----| "
- - "L1.?[1686862046243243215,1686862975108108077] 1686931893.7s 14mb |---L1.?----| "
- - "L1.?[1686862975108108078,1686863903972972940] 1686931893.7s 14mb |---L1.?----| "
- - "L1.?[1686863903972972941,1686864651607515459] 1686931893.7s 12mb |--L1.?--| "
- - "**** Simulation run 227, type=split(HighL0OverlapTotalBacklog)(split_times=[1686858330783783762]). 1 Input Files, 45mb total:"
- - "L0, all files 45mb "
- - "L0.986[1686858251288003856,1686858795216216200] 1686935947.46s|-----------------------------------------L0.986-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 45mb total:"
- - "L0 "
- - "L0.?[1686858251288003856,1686858330783783762] 1686935947.46s 7mb|---L0.?----| "
- - "L0.?[1686858330783783763,1686858795216216200] 1686935947.46s 39mb |-----------------------------------L0.?-----------------------------------| "
- - "**** Simulation run 228, type=split(HighL0OverlapTotalBacklog)(split_times=[1686858330783783762]). 1 Input Files, 42mb total:"
- - "L0, all files 42mb "
- - "L0.984[1686858278525917086,1686858795216216200] 1686932677.39s|-----------------------------------------L0.984-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 42mb total:"
- - "L0 "
- - "L0.?[1686858278525917086,1686858330783783762] 1686932677.39s 4mb|-L0.?--| "
- - "L0.?[1686858330783783763,1686858795216216200] 1686932677.39s 38mb |-------------------------------------L0.?-------------------------------------| "
- - "**** Simulation run 229, type=split(HighL0OverlapTotalBacklog)(split_times=[1686859259648648625]). 1 Input Files, 30mb total:"
- - "L0, all files 30mb "
- - "L0.1124[1686859019000000001,1686859375756756740] 1686932677.39s|----------------------------------------L0.1124-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 30mb total:"
- - "L0 "
- - "L0.?[1686859019000000001,1686859259648648625] 1686932677.39s 20mb|---------------------------L0.?---------------------------| "
- - "L0.?[1686859259648648626,1686859375756756740] 1686932677.39s 10mb |-----------L0.?------------| "
- - "**** Simulation run 230, type=split(HighL0OverlapTotalBacklog)(split_times=[1686859259648648625]). 1 Input Files, 30mb total:"
- - "L0, all files 30mb "
- - "L0.1168[1686859019000000001,1686859375756756740] 1686935947.46s|----------------------------------------L0.1168-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 30mb total:"
- - "L0 "
- - "L0.?[1686859019000000001,1686859259648648625] 1686935947.46s 20mb|---------------------------L0.?---------------------------| "
- - "L0.?[1686859259648648626,1686859375756756740] 1686935947.46s 10mb |-----------L0.?------------| "
- - "**** Simulation run 231, type=split(HighL0OverlapTotalBacklog)(split_times=[1686859259648648625]). 1 Input Files, 1mb total:"
- - "L0, all files 1mb "
- - "L0.1209[1686859019000000001,1686859375756756740] 1686936871.55s|----------------------------------------L0.1209-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:"
- - "L0 "
- - "L0.?[1686859019000000001,1686859259648648625] 1686936871.55s 797kb|---------------------------L0.?---------------------------| "
- - "L0.?[1686859259648648626,1686859375756756740] 1686936871.55s 385kb |-----------L0.?------------| "
- - "**** Simulation run 232, type=split(HighL0OverlapTotalBacklog)(split_times=[1686860188513513488]). 1 Input Files, 2mb total:"
- - "L0, all files 2mb "
- - "L0.1065[1686859956297297280,1686860536837837820] 1686936871.55s|----------------------------------------L0.1065-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 2mb total:"
- - "L0 "
- - "L0.?[1686859956297297280,1686860188513513488] 1686936871.55s 769kb|--------------L0.?---------------| "
- - "L0.?[1686860188513513489,1686860536837837820] 1686936871.55s 1mb |------------------------L0.?------------------------| "
- - "**** Simulation run 233, type=split(HighL0OverlapTotalBacklog)(split_times=[1686860188513513488]). 1 Input Files, 48mb total:"
- - "L0, all files 48mb "
- - "L0.989[1686859969989511639,1686860536837837820] 1686932677.39s|-----------------------------------------L0.989-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 48mb total:"
- - "L0 "
- - "L0.?[1686859969989511639,1686860188513513488] 1686932677.39s 19mb|--------------L0.?--------------| "
- - "L0.?[1686860188513513489,1686860536837837820] 1686932677.39s 30mb |------------------------L0.?-------------------------| "
- - "**** Simulation run 234, type=split(HighL0OverlapTotalBacklog)(split_times=[1686860188513513488]). 1 Input Files, 46mb total:"
- - "L0, all files 46mb "
- - "L0.991[1686859988431489231,1686860536837837820] 1686935947.46s|-----------------------------------------L0.991-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 46mb total:"
- - "L0 "
- - "L0.?[1686859988431489231,1686860188513513488] 1686935947.46s 17mb|-------------L0.?-------------| "
- - "L0.?[1686860188513513489,1686860536837837820] 1686935947.46s 29mb |-------------------------L0.?--------------------------| "
- - "**** Simulation run 235, type=split(HighL0OverlapTotalBacklog)(split_times=[1686861117378378351]). 1 Input Files, 49mb total:"
- - "L0, all files 49mb "
- - "L0.1072[1686860536837837821,1686861117378378360] 1686932677.39s|----------------------------------------L0.1072-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 49mb total:"
- - "L0 "
- - "L0.?[1686860536837837821,1686861117378378351] 1686932677.39s 49mb|-----------------------------------------L0.?------------------------------------------| "
- - "L0.?[1686861117378378352,1686861117378378360] 1686932677.39s 1b |L0.?|"
- - "**** Simulation run 236, type=split(HighL0OverlapTotalBacklog)(split_times=[1686861117378378351]). 1 Input Files, 49mb total:"
- - "L0, all files 49mb "
- - "L0.1075[1686860536837837821,1686861117378378360] 1686935947.46s|----------------------------------------L0.1075-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 49mb total:"
- - "L0 "
- - "L0.?[1686860536837837821,1686861117378378351] 1686935947.46s 49mb|-----------------------------------------L0.?------------------------------------------| "
- - "L0.?[1686861117378378352,1686861117378378360] 1686935947.46s 1b |L0.?|"
- - "**** Simulation run 237, type=split(HighL0OverlapTotalBacklog)(split_times=[1686861117378378351]). 1 Input Files, 2mb total:"
- - "L0, all files 2mb "
- - "L0.1078[1686860536837837821,1686861117378378360] 1686936871.55s|----------------------------------------L0.1078-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 2mb total:"
- - "L0 "
- - "L0.?[1686860536837837821,1686861117378378351] 1686936871.55s 2mb|-----------------------------------------L0.?------------------------------------------| "
- - "L0.?[1686861117378378352,1686861117378378360] 1686936871.55s 1b |L0.?|"
- - "**** Simulation run 238, type=split(HighL0OverlapTotalBacklog)(split_times=[1686862046243243214]). 1 Input Files, 2mb total:"
- - "L0, all files 2mb "
- - "L0.1080[1686861697918918900,1686862278459459440] 1686936871.55s|----------------------------------------L0.1080-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 2mb total:"
- - "L0 "
- - "L0.?[1686861697918918900,1686862046243243214] 1686936871.55s 1mb|-----------------------L0.?------------------------| "
- - "L0.?[1686862046243243215,1686862278459459440] 1686936871.55s 769kb |---------------L0.?---------------| "
- - "**** Simulation run 239, type=split(HighL0OverlapTotalBacklog)(split_times=[1686862046243243214]). 1 Input Files, 48mb total:"
- - "L0, all files 48mb "
- - "L0.1008[1686861711611133259,1686862278459459440] 1686932677.39s|----------------------------------------L0.1008-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 48mb total:"
- - "L0 "
- - "L0.?[1686861711611133259,1686862046243243214] 1686932677.39s 28mb|-----------------------L0.?------------------------| "
- - "L0.?[1686862046243243215,1686862278459459440] 1686932677.39s 20mb |---------------L0.?---------------| "
- - "**** Simulation run 240, type=split(HighL0OverlapTotalBacklog)(split_times=[1686862046243243214]). 1 Input Files, 46mb total:"
- - "L0, all files 46mb "
- - "L0.1010[1686861730053110851,1686862278459459440] 1686935947.46s|----------------------------------------L0.1010-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 46mb total:"
- - "L0 "
- - "L0.?[1686861730053110851,1686862046243243214] 1686935947.46s 26mb|----------------------L0.?-----------------------| "
- - "L0.?[1686862046243243215,1686862278459459440] 1686935947.46s 19mb |----------------L0.?----------------| "
- - "**** Simulation run 241, type=split(HighL0OverlapTotalBacklog)(split_times=[1686862975108108077]). 1 Input Files, 25mb total:"
- - "L0, all files 25mb "
- - "L0.1130[1686862858999999981,1686863149270270250] 1686932677.39s|----------------------------------------L0.1130-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 25mb total:"
- - "L0 "
- - "L0.?[1686862858999999981,1686862975108108077] 1686932677.39s 10mb|--------------L0.?---------------| "
- - "L0.?[1686862975108108078,1686863149270270250] 1686932677.39s 15mb |------------------------L0.?------------------------| "
- - "**** Simulation run 242, type=split(HighL0OverlapTotalBacklog)(split_times=[1686862975108108077]). 1 Input Files, 23mb total:"
- - "L0, all files 23mb "
- - "L0.1174[1686862858999999981,1686863149270270250] 1686935947.46s|----------------------------------------L0.1174-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 23mb total:"
- - "L0 "
- - "L0.?[1686862858999999981,1686862975108108077] 1686935947.46s 9mb|--------------L0.?---------------| "
- - "L0.?[1686862975108108078,1686863149270270250] 1686935947.46s 14mb |------------------------L0.?------------------------| "
- - "**** Simulation run 243, type=split(HighL0OverlapTotalBacklog)(split_times=[1686862975108108077]). 1 Input Files, 2mb total:"
- - "L0, all files 2mb "
- - "L0.1215[1686862858999999981,1686863149270270250] 1686936871.55s|----------------------------------------L0.1215-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 2mb total:"
- - "L0 "
- - "L0.?[1686862858999999981,1686862975108108077] 1686936871.55s 782kb|--------------L0.?---------------| "
- - "L0.?[1686862975108108078,1686863149270270250] 1686936871.55s 1mb |------------------------L0.?------------------------| "
- - "**** Simulation run 244, type=split(HighL0OverlapTotalBacklog)(split_times=[1686863903972972940]). 1 Input Files, 27mb total:"
- - "L0, all files 27mb "
- - "L0.1133[1686863699000000001,1686864020081081060] 1686932677.39s|----------------------------------------L0.1133-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 27mb total:"
- - "L0 "
- - "L0.?[1686863699000000001,1686863903972972940] 1686932677.39s 17mb|-------------------------L0.?--------------------------| "
- - "L0.?[1686863903972972941,1686864020081081060] 1686932677.39s 10mb |-------------L0.?-------------| "
- - "**** Simulation run 245, type=split(HighL0OverlapTotalBacklog)(split_times=[1686863903972972940]). 1 Input Files, 26mb total:"
- - "L0, all files 26mb "
- - "L0.1177[1686863699000000001,1686864020081081060] 1686935947.46s|----------------------------------------L0.1177-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 26mb total:"
- - "L0 "
- - "L0.?[1686863699000000001,1686863903972972940] 1686935947.46s 16mb|-------------------------L0.?--------------------------| "
- - "L0.?[1686863903972972941,1686864020081081060] 1686935947.46s 9mb |-------------L0.?-------------| "
- - "**** Simulation run 246, type=split(HighL0OverlapTotalBacklog)(split_times=[1686863903972972940]). 1 Input Files, 2mb total:"
- - "L0, all files 2mb "
- - "L0.1218[1686863699000000001,1686864020081081060] 1686936871.55s|----------------------------------------L0.1218-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 2mb total:"
- - "L0 "
- - "L0.?[1686863699000000001,1686863903972972940] 1686936871.55s 1mb|-------------------------L0.?--------------------------| "
- - "L0.?[1686863903972972941,1686864020081081060] 1686936871.55s 782kb |-------------L0.?-------------| "
- - "**** Simulation run 247, type=split(HighL0OverlapTotalBacklog)(split_times=[1686864832837837803]). 1 Input Files, 58mb total:"
- - "L0, all files 58mb "
- - "L0.1142[1686864020081081061,1686864890891891870] 1686934966.48s|----------------------------------------L0.1142-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 58mb total:"
- - "L0 "
- - "L0.?[1686864020081081061,1686864832837837803] 1686934966.48s 55mb|--------------------------------------L0.?---------------------------------------| "
- - "L0.?[1686864832837837804,1686864890891891870] 1686934966.48s 4mb |L0.?| "
- - "**** Simulation run 248, type=split(HighL0OverlapTotalBacklog)(split_times=[1686864832837837803]). 1 Input Files, 27mb total:"
- - "L0, all files 27mb "
- - "L0.1219[1686864020081081061,1686864890891891870] 1686936871.55s|----------------------------------------L0.1219-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 27mb total:"
- - "L0 "
- - "L0.?[1686864020081081061,1686864832837837803] 1686936871.55s 26mb|--------------------------------------L0.?---------------------------------------| "
- - "L0.?[1686864832837837804,1686864890891891870] 1686936871.55s 2mb |L0.?| "
- - "**** Simulation run 249, type=split(HighL0OverlapTotalBacklog)(split_times=[1686864832837837803]). 1 Input Files, 17mb total:"
- - "L1, all files 17mb "
- - "L1.1229[1686864651607515460,1686865761702702680] 1686931893.7s|----------------------------------------L1.1229-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 17mb total:"
- - "L1 "
- - "L1.?[1686864651607515460,1686864832837837803] 1686931893.7s 3mb|----L1.?----| "
- - "L1.?[1686864832837837804,1686865761702702680] 1686931893.7s 14mb |----------------------------------L1.?-----------------------------------| "
- - "Committing partition 1:"
- - " Soft Deleting 43 files: L0.984, L0.986, L0.989, L0.991, L0.1008, L0.1010, L0.1033, L0.1036, L0.1039, L0.1046, L0.1049, L0.1052, L0.1054, L0.1065, L0.1072, L0.1075, L0.1078, L0.1080, L0.1116, L0.1117, L0.1120, L0.1124, L0.1130, L0.1133, L0.1141, L0.1142, L0.1144, L0.1145, L0.1148, L0.1168, L0.1174, L0.1177, L0.1200, L0.1201, L0.1202, L0.1205, L0.1209, L0.1215, L0.1218, L0.1219, L1.1227, L1.1228, L1.1229"
- - " Creating 97 files"
- - "**** Simulation run 250, type=split(ReduceOverlap)(split_times=[1686858240168622590]). 1 Input Files, 5mb total:"
- - "L0, all files 5mb "
- - "L0.1048[1686858214675675660,1686858278525917085] 1686932677.39s|----------------------------------------L0.1048-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 5mb total:"
- - "L0 "
- - "L0.?[1686858214675675660,1686858240168622590] 1686932677.39s 2mb|--------------L0.?---------------| "
- - "L0.?[1686858240168622591,1686858278525917085] 1686932677.39s 3mb |------------------------L0.?------------------------| "
- - "**** Simulation run 251, type=split(ReduceOverlap)(split_times=[1686864651607515459]). 1 Input Files, 55mb total:"
- - "L0, all files 55mb "
- - "L0.1321[1686864020081081061,1686864832837837803] 1686934966.48s|----------------------------------------L0.1321-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 55mb total:"
- - "L0 "
- - "L0.?[1686864020081081061,1686864651607515459] 1686934966.48s 42mb|-------------------------------L0.?--------------------------------| "
- - "L0.?[1686864651607515460,1686864832837837803] 1686934966.48s 12mb |-------L0.?-------| "
- - "**** Simulation run 252, type=split(ReduceOverlap)(split_times=[1686864651607515459]). 1 Input Files, 26mb total:"
- - "L0, all files 26mb "
- - "L0.1323[1686864020081081061,1686864832837837803] 1686936871.55s|----------------------------------------L0.1323-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 26mb total:"
- - "L0 "
- - "L0.?[1686864020081081061,1686864651607515459] 1686936871.55s 20mb|-------------------------------L0.?--------------------------------| "
- - "L0.?[1686864651607515460,1686864832837837803] 1686936871.55s 6mb |-------L0.?-------| "
- - "**** Simulation run 253, type=split(ReduceOverlap)(split_times=[1686858240168622590]). 1 Input Files, 3mb total:"
- - "L0, all files 3mb "
- - "L0.1051[1686858214675675660,1686858251288003855] 1686935947.46s|----------------------------------------L0.1051-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 3mb total:"
- - "L0 "
- - "L0.?[1686858214675675660,1686858240168622590] 1686935947.46s 2mb|----------------------------L0.?----------------------------| "
- - "L0.?[1686858240168622591,1686858251288003855] 1686935947.46s 951kb |----------L0.?-----------| "
- - "**** Simulation run 254, type=split(ReduceOverlap)(split_times=[1686858240168622590]). 1 Input Files, 782kb total:"
- - "L0, all files 782kb "
- - "L0.1271[1686858214675675660,1686858330783783762] 1686936871.55s|----------------------------------------L0.1271-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 782kb total:"
- - "L0 "
- - "L0.?[1686858214675675660,1686858240168622590] 1686936871.55s 172kb|------L0.?-------| "
- - "L0.?[1686858240168622591,1686858330783783762] 1686936871.55s 611kb |--------------------------------L0.?--------------------------------| "
- - "Committing partition 1:"
- - " Soft Deleting 5 files: L0.1048, L0.1051, L0.1271, L0.1321, L0.1323"
- - " Creating 10 files"
- - "**** Simulation run 255, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1686842571022320444, 1686843763044640888]). 10 Input Files, 292mb total:"
- - "L0 "
- - "L0.1100[1686841379000000000,1686842249810810810] 1686932677.39s 72mb|------L0.1100-------| "
- - "L0.1101[1686842249810810811,1686842589641148927] 1686932677.39s 28mb |L0.1101| "
- - "L0.937[1686842589641148928,1686843120621621620] 1686932677.39s 44mb |--L0.937---| "
- - "L0.1102[1686843120621621621,1686843991432432430] 1686932677.39s 72mb |------L0.1102-------| "
- - "L0.1103[1686843991432432431,1686844331262770547] 1686932677.39s 28mb |L0.1103| "
- - "L0.942[1686844331262770548,1686844862243243240] 1686932677.39s 44mb |--L0.942---| "
- - "L1 "
- - "L1.84[1686841379000000000,1686842249810810810] 1686928854.57s 1mb|-------L1.84--------| "
- - "L1.85[1686842249810810811,1686843120621621620] 1686928854.57s 1mb |-------L1.85--------| "
- - "L1.86[1686843120621621621,1686843991432432430] 1686928854.57s 1mb |-------L1.86--------| "
- - "L1.87[1686843991432432431,1686844862243243240] 1686928854.57s 1mb |-------L1.87--------| "
- - "**** 3 Output Files (parquet_file_id not yet assigned), 292mb total:"
- - "L1 "
- - "L1.?[1686841379000000000,1686842571022320444] 1686932677.39s 100mb|------------L1.?------------| "
- - "L1.?[1686842571022320445,1686843763044640888] 1686932677.39s 100mb |------------L1.?------------| "
- - "L1.?[1686843763044640889,1686844862243243240] 1686932677.39s 92mb |-----------L1.?-----------| "
- - "Committing partition 1:"
- - " Soft Deleting 10 files: L1.84, L1.85, L1.86, L1.87, L0.937, L0.942, L0.1100, L0.1101, L0.1102, L0.1103"
- - " Creating 3 files"
- - "**** Simulation run 256, type=split(HighL0OverlapTotalBacklog)(split_times=[1686842540081081080]). 1 Input Files, 100mb total:"
- - "L1, all files 100mb "
- - "L1.1337[1686841379000000000,1686842571022320444] 1686932677.39s|----------------------------------------L1.1337-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L1 "
- - "L1.?[1686841379000000000,1686842540081081080] 1686932677.39s 97mb|----------------------------------------L1.?-----------------------------------------| "
- - "L1.?[1686842540081081081,1686842571022320444] 1686932677.39s 3mb |L1.?|"
- - "**** Simulation run 257, type=split(HighL0OverlapTotalBacklog)(split_times=[1686842540081081080]). 1 Input Files, 28mb total:"
- - "L0, all files 28mb "
- - "L0.1150[1686842249810810811,1686842593136179151] 1686935947.46s|----------------------------------------L0.1150-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 28mb total:"
- - "L0 "
- - "L0.?[1686842249810810811,1686842540081081080] 1686935947.46s 24mb|-----------------------------------L0.?-----------------------------------| "
- - "L0.?[1686842540081081081,1686842593136179151] 1686935947.46s 4mb |---L0.?----| "
- - "**** Simulation run 258, type=split(HighL0OverlapTotalBacklog)(split_times=[1686842540081081080]). 1 Input Files, 3mb total:"
- - "L0, all files 3mb "
- - "L0.1185[1686842249810810811,1686843120621621620] 1686936871.55s|----------------------------------------L0.1185-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 3mb total:"
- - "L0 "
- - "L0.?[1686842249810810811,1686842540081081080] 1686936871.55s 962kb|-----------L0.?------------| "
- - "L0.?[1686842540081081081,1686843120621621620] 1686936871.55s 2mb |--------------------------L0.?---------------------------| "
- - "**** Simulation run 259, type=split(HighL0OverlapTotalBacklog)(split_times=[1686843701162162160]). 1 Input Files, 100mb total:"
- - "L1, all files 100mb "
- - "L1.1338[1686842571022320445,1686843763044640888] 1686932677.39s|----------------------------------------L1.1338-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L1 "
- - "L1.?[1686842571022320445,1686843701162162160] 1686932677.39s 95mb|---------------------------------------L1.?----------------------------------------| "
- - "L1.?[1686843701162162161,1686843763044640888] 1686932677.39s 5mb |L1.?|"
- - "**** Simulation run 260, type=split(HighL0OverlapTotalBacklog)(split_times=[1686843701162162160]). 1 Input Files, 72mb total:"
- - "L0, all files 72mb "
- - "L0.1151[1686843120621621621,1686843991432432430] 1686935947.46s|----------------------------------------L0.1151-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 72mb total:"
- - "L0 "
- - "L0.?[1686843120621621621,1686843701162162160] 1686935947.46s 48mb|--------------------------L0.?---------------------------| "
- - "L0.?[1686843701162162161,1686843991432432430] 1686935947.46s 24mb |-----------L0.?------------| "
- - "**** Simulation run 261, type=split(HighL0OverlapTotalBacklog)(split_times=[1686843701162162160]). 1 Input Files, 3mb total:"
- - "L0, all files 3mb "
- - "L0.1186[1686843120621621621,1686843991432432430] 1686936871.55s|----------------------------------------L0.1186-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 3mb total:"
- - "L0 "
- - "L0.?[1686843120621621621,1686843701162162160] 1686936871.55s 2mb|--------------------------L0.?---------------------------| "
- - "L0.?[1686843701162162161,1686843991432432430] 1686936871.55s 962kb |-----------L0.?------------| "
- - "Committing partition 1:"
- - " Soft Deleting 6 files: L0.1150, L0.1151, L0.1185, L0.1186, L1.1337, L1.1338"
- - " Creating 12 files"
- - "**** Simulation run 262, type=split(ReduceOverlap)(split_times=[1686842571022320444]). 1 Input Files, 4mb total:"
- - "L0, all files 4mb "
- - "L0.1343[1686842540081081081,1686842593136179151] 1686935947.46s|----------------------------------------L0.1343-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 4mb total:"
- - "L0 "
- - "L0.?[1686842540081081081,1686842571022320444] 1686935947.46s 3mb|-----------------------L0.?-----------------------| "
- - "L0.?[1686842571022320445,1686842593136179151] 1686935947.46s 2mb |---------------L0.?----------------| "
- - "**** Simulation run 263, type=split(ReduceOverlap)(split_times=[1686843763044640888]). 1 Input Files, 24mb total:"
- - "L0, all files 24mb "
- - "L0.1349[1686843701162162161,1686843991432432430] 1686935947.46s|----------------------------------------L0.1349-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 24mb total:"
- - "L0 "
- - "L0.?[1686843701162162161,1686843763044640888] 1686935947.46s 5mb|------L0.?-------| "
- - "L0.?[1686843763044640889,1686843991432432430] 1686935947.46s 19mb |--------------------------------L0.?--------------------------------| "
- - "**** Simulation run 264, type=split(ReduceOverlap)(split_times=[1686842571022320444]). 1 Input Files, 2mb total:"
- - "L0, all files 2mb "
- - "L0.1345[1686842540081081081,1686843120621621620] 1686936871.55s|----------------------------------------L0.1345-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 2mb total:"
- - "L0 "
- - "L0.?[1686842540081081081,1686842571022320444] 1686936871.55s 103kb|L0.?| "
- - "L0.?[1686842571022320445,1686843120621621620] 1686936871.55s 2mb |---------------------------------------L0.?----------------------------------------| "
- - "**** Simulation run 265, type=split(ReduceOverlap)(split_times=[1686843763044640888]). 1 Input Files, 962kb total:"
- - "L0, all files 962kb "
- - "L0.1351[1686843701162162161,1686843991432432430] 1686936871.55s|----------------------------------------L0.1351-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 962kb total:"
- - "L0 "
- - "L0.?[1686843701162162161,1686843763044640888] 1686936871.55s 205kb|------L0.?-------| "
- - "L0.?[1686843763044640889,1686843991432432430] 1686936871.55s 757kb |--------------------------------L0.?--------------------------------| "
- - "Committing partition 1:"
- - " Soft Deleting 4 files: L0.1343, L0.1345, L0.1349, L0.1351"
- - " Creating 8 files"
- - "**** Simulation run 266, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1686846054770701976, 1686847247298160711]). 12 Input Files, 292mb total:"
- - "L0 "
- - "L0.1104[1686844862243243241,1686845579000000000] 1686932677.39s 59mb|----L0.1104-----| "
- - "L0.1105[1686845579000000001,1686845733054054050] 1686932677.39s 13mb |L0.1105| "
- - "L0.1106[1686845733054054051,1686846072884392167] 1686932677.39s 28mb |L0.1106| "
- - "L0.947[1686846072884392168,1686846603864864860] 1686932677.39s 44mb |--L0.947---| "
- - "L0.1107[1686846603864864861,1686847474675675670] 1686932677.39s 72mb |------L0.1107-------| "
- - "L0.1108[1686847474675675671,1686847814506013787] 1686932677.39s 28mb |L0.1108| "
- - "L0.951[1686847814506013788,1686848345486486480] 1686932677.39s 44mb |--L0.951---| "
- - "L1 "
- - "L1.88[1686844862243243241,1686845579000000000] 1686928854.57s 947kb|-----L1.88------| "
- - "L1.413[1686845639000000000,1686845733054054050] 1686928854.57s 123kb |L1.413| "
- - "L1.414[1686845733054054051,1686846603864864860] 1686928854.57s 1mb |-------L1.414-------| "
- - "L1.415[1686846603864864861,1686847474675675670] 1686928854.57s 1mb |-------L1.415-------| "
- - "L1.416[1686847474675675671,1686848345486486480] 1686928854.57s 1mb |-------L1.416-------| "
- - "**** 3 Output Files (parquet_file_id not yet assigned), 292mb total:"
- - "L1 "
- - "L1.?[1686844862243243241,1686846054770701976] 1686932677.39s 100mb|------------L1.?------------| "
- - "L1.?[1686846054770701977,1686847247298160711] 1686932677.39s 100mb |------------L1.?------------| "
- - "L1.?[1686847247298160712,1686848345486486480] 1686932677.39s 92mb |-----------L1.?-----------| "
- - "Committing partition 1:"
- - " Soft Deleting 12 files: L1.88, L1.413, L1.414, L1.415, L1.416, L0.947, L0.951, L0.1104, L0.1105, L0.1106, L0.1107, L0.1108"
- - " Creating 3 files"
- - "**** Simulation run 267, type=split(HighL0OverlapTotalBacklog)(split_times=[1686846023324324320]). 1 Input Files, 100mb total:"
- - "L1, all files 100mb "
- - "L1.1360[1686844862243243241,1686846054770701976] 1686932677.39s|----------------------------------------L1.1360-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L1 "
- - "L1.?[1686844862243243241,1686846023324324320] 1686932677.39s 97mb|----------------------------------------L1.?-----------------------------------------| "
- - "L1.?[1686846023324324321,1686846054770701976] 1686932677.39s 3mb |L1.?|"
- - "**** Simulation run 268, type=split(HighL0OverlapTotalBacklog)(split_times=[1686846023324324320]). 1 Input Files, 28mb total:"
- - "L0, all files 28mb "
- - "L0.1155[1686845733054054051,1686846076379422391] 1686935947.46s|----------------------------------------L0.1155-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 28mb total:"
- - "L0 "
- - "L0.?[1686845733054054051,1686846023324324320] 1686935947.46s 24mb|-----------------------------------L0.?-----------------------------------| "
- - "L0.?[1686846023324324321,1686846076379422391] 1686935947.46s 4mb |---L0.?----| "
- - "**** Simulation run 269, type=split(HighL0OverlapTotalBacklog)(split_times=[1686846023324324320]). 1 Input Files, 3mb total:"
- - "L0, all files 3mb "
- - "L0.1190[1686845733054054051,1686846603864864860] 1686936871.55s|----------------------------------------L0.1190-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 3mb total:"
- - "L0 "
- - "L0.?[1686845733054054051,1686846023324324320] 1686936871.55s 962kb|-----------L0.?------------| "
- - "L0.?[1686846023324324321,1686846603864864860] 1686936871.55s 2mb |--------------------------L0.?---------------------------| "
- - "**** Simulation run 270, type=split(HighL0OverlapTotalBacklog)(split_times=[1686847184405405399]). 1 Input Files, 100mb total:"
- - "L1, all files 100mb "
- - "L1.1361[1686846054770701977,1686847247298160711] 1686932677.39s|----------------------------------------L1.1361-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L1 "
- - "L1.?[1686846054770701977,1686847184405405399] 1686932677.39s 95mb|---------------------------------------L1.?----------------------------------------| "
- - "L1.?[1686847184405405400,1686847247298160711] 1686932677.39s 5mb |L1.?|"
- - "**** Simulation run 271, type=split(HighL0OverlapTotalBacklog)(split_times=[1686847184405405399]). 1 Input Files, 72mb total:"
- - "L0, all files 72mb "
- - "L0.1156[1686846603864864861,1686847474675675670] 1686935947.46s|----------------------------------------L0.1156-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 72mb total:"
- - "L0 "
- - "L0.?[1686846603864864861,1686847184405405399] 1686935947.46s 48mb|--------------------------L0.?---------------------------| "
- - "L0.?[1686847184405405400,1686847474675675670] 1686935947.46s 24mb |------------L0.?------------| "
- - "**** Simulation run 272, type=split(HighL0OverlapTotalBacklog)(split_times=[1686847184405405399]). 1 Input Files, 3mb total:"
- - "L0, all files 3mb "
- - "L0.1191[1686846603864864861,1686847474675675670] 1686936871.55s|----------------------------------------L0.1191-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 3mb total:"
- - "L0 "
- - "L0.?[1686846603864864861,1686847184405405399] 1686936871.55s 2mb|--------------------------L0.?---------------------------| "
- - "L0.?[1686847184405405400,1686847474675675670] 1686936871.55s 962kb |------------L0.?------------| "
- - "Committing partition 1:"
- - " Soft Deleting 6 files: L0.1155, L0.1156, L0.1190, L0.1191, L1.1360, L1.1361"
- - " Creating 12 files"
- - "**** Simulation run 273, type=split(ReduceOverlap)(split_times=[1686846054770701976]). 1 Input Files, 4mb total:"
- - "L0, all files 4mb "
- - "L0.1366[1686846023324324321,1686846076379422391] 1686935947.46s|----------------------------------------L0.1366-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 4mb total:"
- - "L0 "
- - "L0.?[1686846023324324321,1686846054770701976] 1686935947.46s 3mb|-----------------------L0.?------------------------| "
- - "L0.?[1686846054770701977,1686846076379422391] 1686935947.46s 2mb |---------------L0.?---------------| "
- - "**** Simulation run 274, type=split(ReduceOverlap)(split_times=[1686847247298160711]). 1 Input Files, 24mb total:"
- - "L0, all files 24mb "
- - "L0.1372[1686847184405405400,1686847474675675670] 1686935947.46s|----------------------------------------L0.1372-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 24mb total:"
- - "L0 "
- - "L0.?[1686847184405405400,1686847247298160711] 1686935947.46s 5mb|------L0.?-------| "
- - "L0.?[1686847247298160712,1686847474675675670] 1686935947.46s 19mb |--------------------------------L0.?--------------------------------| "
- - "**** Simulation run 275, type=split(ReduceOverlap)(split_times=[1686846054770701976]). 1 Input Files, 2mb total:"
- - "L0, all files 2mb "
- - "L0.1368[1686846023324324321,1686846603864864860] 1686936871.55s|----------------------------------------L0.1368-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 2mb total:"
- - "L0 "
- - "L0.?[1686846023324324321,1686846054770701976] 1686936871.55s 104kb|L0.?| "
- - "L0.?[1686846054770701977,1686846603864864860] 1686936871.55s 2mb |---------------------------------------L0.?----------------------------------------| "
- - "**** Simulation run 276, type=split(ReduceOverlap)(split_times=[1686847247298160711]). 1 Input Files, 962kb total:"
- - "L0, all files 962kb "
- - "L0.1374[1686847184405405400,1686847474675675670] 1686936871.55s|----------------------------------------L0.1374-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 962kb total:"
- - "L0 "
- - "L0.?[1686847184405405400,1686847247298160711] 1686936871.55s 208kb|------L0.?-------| "
- - "L0.?[1686847247298160712,1686847474675675670] 1686936871.55s 753kb |--------------------------------L0.?--------------------------------| "
- - "Committing partition 1:"
- - " Soft Deleting 4 files: L0.1366, L0.1368, L0.1372, L0.1374"
- - " Creating 8 files"
- - "**** Simulation run 277, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1686849559289331160, 1686850773092175839]). 14 Input Files, 287mb total:"
- - "L0 "
- - "L0.1109[1686848345486486481,1686849216297297290] 1686932677.39s 69mb|------L0.1109-------| "
- - "L0.1110[1686849216297297291,1686849600239388909] 1686932677.39s 31mb |L0.1110| "
- - "L0.1111[1686849600239388910,1686849779000000000] 1686932677.39s 14mb |L0.1111| "
- - "L0.1112[1686849779000000001,1686850087108108100] 1686932677.39s 25mb |L0.1112| "
- - "L0.1113[1686850087108108101,1686850559000000000] 1686932677.39s 39mb |-L0.1113--| "
- - "L0.1114[1686850559000000001,1686850957918918910] 1686932677.39s 33mb |L0.1114-| "
- - "L0.1115[1686850957918918911,1686851297766913590] 1686932677.39s 28mb |L0.1115| "
- - "L0.961[1686851297766913591,1686851828729729720] 1686932677.39s 44mb |--L0.961---| "
- - "L1 "
- - "L1.417[1686848345486486481,1686849216297297290] 1686928854.57s 1mb|-------L1.417-------| "
- - "L1.418[1686849216297297291,1686849779000000000] 1686928854.57s 734kb |---L1.418---| "
- - "L1.430[1686849839000000000,1686850087108108100] 1686928854.57s 336kb |L1.430| "
- - "L1.431[1686850087108108101,1686850559000000000] 1686928854.57s 639kb |--L1.431--| "
- - "L1.432[1686850619000000000,1686850957918918910] 1686928854.57s 440kb |L1.432| "
- - "L1.433[1686850957918918911,1686851828729729720] 1686928854.57s 1mb |-------L1.433-------| "
- - "**** 3 Output Files (parquet_file_id not yet assigned), 287mb total:"
- - "L1 "
- - "L1.?[1686848345486486481,1686849559289331160] 1686932677.39s 100mb|------------L1.?-------------| "
- - "L1.?[1686849559289331161,1686850773092175839] 1686932677.39s 100mb |------------L1.?-------------| "
- - "L1.?[1686850773092175840,1686851828729729720] 1686932677.39s 87mb |----------L1.?-----------| "
- - "Committing partition 1:"
- - " Soft Deleting 14 files: L1.417, L1.418, L1.430, L1.431, L1.432, L1.433, L0.961, L0.1109, L0.1110, L0.1111, L0.1112, L0.1113, L0.1114, L0.1115"
- - " Creating 3 files"
- - "**** Simulation run 278, type=split(HighL0OverlapTotalBacklog)(split_times=[1686849506567567560]). 1 Input Files, 100mb total:"
- - "L1, all files 100mb "
- - "L1.1383[1686848345486486481,1686849559289331160] 1686932677.39s|----------------------------------------L1.1383-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L1 "
- - "L1.?[1686848345486486481,1686849506567567560] 1686932677.39s 96mb|----------------------------------------L1.?----------------------------------------| "
- - "L1.?[1686849506567567561,1686849559289331160] 1686932677.39s 4mb |L1.?|"
- - "**** Simulation run 279, type=split(HighL0OverlapTotalBacklog)(split_times=[1686849506567567560]). 1 Input Files, 29mb total:"
- - "L0, all files 29mb "
- - "L0.1159[1686849216297297291,1686849568759166090] 1686935947.46s|----------------------------------------L0.1159-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 29mb total:"
- - "L0 "
- - "L0.?[1686849216297297291,1686849506567567560] 1686935947.46s 24mb|----------------------------------L0.?----------------------------------| "
- - "L0.?[1686849506567567561,1686849568759166090] 1686935947.46s 5mb |----L0.?-----| "
- - "**** Simulation run 280, type=split(HighL0OverlapTotalBacklog)(split_times=[1686849506567567560]). 1 Input Files, 4mb total:"
- - "L0, all files 4mb "
- - "L0.1194[1686849216297297291,1686849779000000000] 1686936871.55s|----------------------------------------L0.1194-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 4mb total:"
- - "L0 "
- - "L0.?[1686849216297297291,1686849506567567560] 1686936871.55s 2mb|--------------------L0.?--------------------| "
- - "L0.?[1686849506567567561,1686849779000000000] 1686936871.55s 2mb |------------------L0.?-------------------| "
- - "**** Simulation run 281, type=split(HighL0OverlapTotalBacklog)(split_times=[1686850667648648639]). 1 Input Files, 100mb total:"
- - "L1, all files 100mb "
- - "L1.1384[1686849559289331161,1686850773092175839] 1686932677.39s|----------------------------------------L1.1384-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L1 "
- - "L1.?[1686849559289331161,1686850667648648639] 1686932677.39s 91mb|--------------------------------------L1.?--------------------------------------| "
- - "L1.?[1686850667648648640,1686850773092175839] 1686932677.39s 9mb |L1.?-| "
- - "**** Simulation run 282, type=split(HighL0OverlapTotalBacklog)(split_times=[1686850667648648639]). 1 Input Files, 33mb total:"
- - "L0, all files 33mb "
- - "L0.1163[1686850559000000001,1686850957918918910] 1686935947.46s|----------------------------------------L0.1163-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 33mb total:"
- - "L0 "
- - "L0.?[1686850559000000001,1686850667648648639] 1686935947.46s 9mb|---------L0.?---------| "
- - "L0.?[1686850667648648640,1686850957918918910] 1686935947.46s 24mb |-----------------------------L0.?------------------------------| "
- - "**** Simulation run 283, type=split(HighL0OverlapTotalBacklog)(split_times=[1686850667648648639]). 1 Input Files, 1mb total:"
- - "L0, all files 1mb "
- - "L0.1197[1686850559000000001,1686850957918918910] 1686936871.55s|----------------------------------------L0.1197-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:"
- - "L0 "
- - "L0.?[1686850559000000001,1686850667648648639] 1686936871.55s 360kb|---------L0.?---------| "
- - "L0.?[1686850667648648640,1686850957918918910] 1686936871.55s 962kb |-----------------------------L0.?------------------------------| "
- - "Committing partition 1:"
- - " Soft Deleting 6 files: L0.1159, L0.1163, L0.1194, L0.1197, L1.1383, L1.1384"
- - " Creating 12 files"
- - "**** Simulation run 284, type=split(ReduceOverlap)(split_times=[1686849559289331160]). 1 Input Files, 5mb total:"
- - "L0, all files 5mb "
- - "L0.1389[1686849506567567561,1686849568759166090] 1686935947.46s|----------------------------------------L0.1389-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 5mb total:"
- - "L0 "
- - "L0.?[1686849506567567561,1686849559289331160] 1686935947.46s 4mb|-----------------------------------L0.?-----------------------------------| "
- - "L0.?[1686849559289331161,1686849568759166090] 1686935947.46s 793kb |---L0.?----| "
- - "**** Simulation run 285, type=split(ReduceOverlap)(split_times=[1686850773092175839]). 1 Input Files, 24mb total:"
- - "L0, all files 24mb "
- - "L0.1395[1686850667648648640,1686850957918918910] 1686935947.46s|----------------------------------------L0.1395-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 24mb total:"
- - "L0 "
- - "L0.?[1686850667648648640,1686850773092175839] 1686935947.46s 9mb|-------------L0.?-------------| "
- - "L0.?[1686850773092175840,1686850957918918910] 1686935947.46s 15mb |-------------------------L0.?--------------------------| "
- - "**** Simulation run 286, type=split(ReduceOverlap)(split_times=[1686849559289331160]). 1 Input Files, 2mb total:"
- - "L0, all files 2mb "
- - "L0.1391[1686849506567567561,1686849779000000000] 1686936871.55s|----------------------------------------L0.1391-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 2mb total:"
- - "L0 "
- - "L0.?[1686849506567567561,1686849559289331160] 1686936871.55s 342kb|-----L0.?------| "
- - "L0.?[1686849559289331161,1686849779000000000] 1686936871.55s 1mb |---------------------------------L0.?---------------------------------| "
- - "**** Simulation run 287, type=split(ReduceOverlap)(split_times=[1686850773092175839]). 1 Input Files, 962kb total:"
- - "L0, all files 962kb "
- - "L0.1397[1686850667648648640,1686850957918918910] 1686936871.55s|----------------------------------------L0.1397-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 962kb total:"
- - "L0 "
- - "L0.?[1686850667648648640,1686850773092175839] 1686936871.55s 349kb|-------------L0.?-------------| "
- - "L0.?[1686850773092175840,1686850957918918910] 1686936871.55s 612kb |-------------------------L0.?--------------------------| "
- - "Committing partition 1:"
- - " Soft Deleting 4 files: L0.1389, L0.1391, L0.1395, L0.1397"
- - " Creating 8 files"
- - "**** Simulation run 288, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1686854034336087198, 1686855311077579811]). 14 Input Files, 291mb total:"
- - "L0 "
- - "L0.1241[1686853570351351341,1686853686459459447] 1686932677.39s 9mb |L0.1241| "
- - "L0.1242[1686853686459459448,1686854441162162150] 1686932677.39s 60mb |----L0.1242-----| "
- - "L0.1247[1686854441162162151,1686854615324324310] 1686932677.39s 14mb |L0.1247| "
- - "L0.1248[1686854615324324311,1686854819000000000] 1686932677.39s 16mb |L0.1248| "
- - "L0.1118[1686854819000000001,1686854830189955965] 1686932677.39s 910kb |L0.1118| "
- - "L0.973[1686854830189955966,1686855311972972960] 1686932677.39s 38mb |-L0.973--| "
- - "L0.1253[1686855311972972961,1686855544189189173] 1686932677.39s 19mb |L0.1253| "
- - "L0.1254[1686855544189189174,1686855892513513500] 1686932677.39s 28mb |L0.1254| "
- - "L0.1119[1686855892513513501,1686856182783783770] 1686932677.39s 24mb |L0.1119| "
- - "L0.1259[1686856182783783771,1686856473054054036] 1686932677.39s 24mb |L0.1259|"
- - "L1 "
- - "L1.1231[1686852757594594585,1686853686459459447] 1686931893.7s 14mb|------L1.1231-------| "
- - "L1.1232[1686853686459459448,1686854615324324310] 1686931893.7s 14mb |------L1.1232-------| "
- - "L1.1233[1686854615324324311,1686855544189189173] 1686931893.7s 14mb |------L1.1233-------| "
- - "L1.1234[1686855544189189174,1686856473054054036] 1686931893.7s 14mb |------L1.1234-------| "
- - "**** 3 Output Files (parquet_file_id not yet assigned), 291mb total:"
- - "L1 "
- - "L1.?[1686852757594594585,1686854034336087198] 1686932677.39s 100mb|------------L1.?------------| "
- - "L1.?[1686854034336087199,1686855311077579811] 1686932677.39s 100mb |------------L1.?------------| "
- - "L1.?[1686855311077579812,1686856473054054036] 1686932677.39s 91mb |-----------L1.?-----------| "
- - "Committing partition 1:"
- - " Soft Deleting 14 files: L0.973, L0.1118, L0.1119, L1.1231, L1.1232, L1.1233, L1.1234, L0.1241, L0.1242, L0.1247, L0.1248, L0.1253, L0.1254, L0.1259"
- - " Creating 3 files"
- - "**** Simulation run 289, type=split(HighL0OverlapTotalBacklog)(split_times=[1686853500686486475]). 1 Input Files, 100mb total:"
- - "L1, all files 100mb "
- - "L1.1406[1686852757594594585,1686854034336087198] 1686932677.39s|----------------------------------------L1.1406-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L1 "
- - "L1.?[1686852757594594585,1686853500686486475] 1686932677.39s 58mb|-----------------------L1.?-----------------------| "
- - "L1.?[1686853500686486476,1686854034336087198] 1686932677.39s 42mb |---------------L1.?----------------| "
- - "**** Simulation run 290, type=split(HighL0OverlapTotalBacklog)(split_times=[1686853500686486475]). 1 Input Files, 14mb total:"
- - "L0, all files 14mb "
- - "L0.971[1686853222027027017,1686853570351351340] 1686936871.55s|-----------------------------------------L0.971-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 14mb total:"
- - "L0 "
- - "L0.?[1686853222027027017,1686853500686486475] 1686936871.55s 11mb|--------------------------------L0.?---------------------------------| "
- - "L0.?[1686853500686486476,1686853570351351340] 1686936871.55s 3mb |-----L0.?------| "
- - "**** Simulation run 291, type=split(HighL0OverlapTotalBacklog)(split_times=[1686853500686486475]). 1 Input Files, 24mb total:"
- - "L0, all files 24mb "
- - "L0.969[1686853236700974399,1686853570351351340] 1686934966.48s|-----------------------------------------L0.969-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 24mb total:"
- - "L0 "
- - "L0.?[1686853236700974399,1686853500686486475] 1686934966.48s 19mb|--------------------------------L0.?---------------------------------| "
- - "L0.?[1686853500686486476,1686853570351351340] 1686934966.48s 5mb |------L0.?------| "
- - "**** Simulation run 292, type=split(HighL0OverlapTotalBacklog)(split_times=[1686854243778378365]). 1 Input Files, 51mb total:"
- - "L0, all files 51mb "
- - "L0.1244[1686853686459459448,1686854441162162150] 1686935546.05s|----------------------------------------L0.1244-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 51mb total:"
- - "L0 "
- - "L0.?[1686853686459459448,1686854243778378365] 1686935546.05s 38mb|------------------------------L0.?------------------------------| "
- - "L0.?[1686854243778378366,1686854441162162150] 1686935546.05s 13mb |--------L0.?---------| "
- - "**** Simulation run 293, type=split(HighL0OverlapTotalBacklog)(split_times=[1686854243778378365]). 1 Input Files, 16mb total:"
- - "L0, all files 16mb "
- - "L0.1246[1686853686459459448,1686854441162162150] 1686936871.55s|----------------------------------------L0.1246-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 16mb total:"
- - "L0 "
- - "L0.?[1686853686459459448,1686854243778378365] 1686936871.55s 12mb|------------------------------L0.?------------------------------| "
- - "L0.?[1686854243778378366,1686854441162162150] 1686936871.55s 4mb |--------L0.?---------| "
- - "**** Simulation run 294, type=split(HighL0OverlapTotalBacklog)(split_times=[1686854243778378365, 1686854986870270255]). 1 Input Files, 100mb total:"
- - "L1, all files 100mb "
- - "L1.1407[1686854034336087199,1686855311077579811] 1686932677.39s|----------------------------------------L1.1407-----------------------------------------|"
- - "**** 3 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L1 "
- - "L1.?[1686854034336087199,1686854243778378365] 1686932677.39s 16mb|----L1.?----| "
- - "L1.?[1686854243778378366,1686854986870270255] 1686932677.39s 58mb |-----------------------L1.?-----------------------| "
- - "L1.?[1686854986870270256,1686855311077579811] 1686932677.39s 25mb |--------L1.?--------| "
- - "**** Simulation run 295, type=split(HighL0OverlapTotalBacklog)(split_times=[1686854986870270255]). 1 Input Files, 24mb total:"
- - "L0, all files 24mb "
- - "L0.975[1686854963648648637,1686855311972972960] 1686935546.05s|-----------------------------------------L0.975-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 24mb total:"
- - "L0 "
- - "L0.?[1686854963648648637,1686854986870270255] 1686935546.05s 2mb|L0.?| "
- - "L0.?[1686854986870270256,1686855311972972960] 1686935546.05s 22mb |---------------------------------------L0.?---------------------------------------| "
- - "**** Simulation run 296, type=split(HighL0OverlapTotalBacklog)(split_times=[1686854986870270255]). 1 Input Files, 7mb total:"
- - "L0, all files 7mb "
- - "L0.977[1686854963648648637,1686855311972972960] 1686936871.55s|-----------------------------------------L0.977-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 7mb total:"
- - "L0 "
- - "L0.?[1686854963648648637,1686854986870270255] 1686936871.55s 499kb|L0.?| "
- - "L0.?[1686854986870270256,1686855311972972960] 1686936871.55s 7mb |---------------------------------------L0.?---------------------------------------| "
- - "**** Simulation run 297, type=split(HighL0OverlapTotalBacklog)(split_times=[1686855729962162145]). 1 Input Files, 91mb total:"
- - "L1, all files 91mb "
- - "L1.1408[1686855311077579812,1686856473054054036] 1686932677.39s|----------------------------------------L1.1408-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 91mb total:"
- - "L1 "
- - "L1.?[1686855311077579812,1686855729962162145] 1686932677.39s 33mb|-------------L1.?-------------| "
- - "L1.?[1686855729962162146,1686856473054054036] 1686932677.39s 58mb |-------------------------L1.?--------------------------| "
- - "**** Simulation run 298, type=split(HighL0OverlapTotalBacklog)(split_times=[1686855729962162145]). 1 Input Files, 28mb total:"
- - "L0, all files 28mb "
- - "L0.1256[1686855544189189174,1686855892513513500] 1686935742.51s|----------------------------------------L0.1256-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 28mb total:"
- - "L0 "
- - "L0.?[1686855544189189174,1686855729962162145] 1686935742.51s 15mb|--------------------L0.?---------------------| "
- - "L0.?[1686855729962162146,1686855892513513500] 1686935742.51s 13mb |------------------L0.?------------------| "
- - "**** Simulation run 299, type=split(HighL0OverlapTotalBacklog)(split_times=[1686855729962162145]). 1 Input Files, 3mb total:"
- - "L0, all files 3mb "
- - "L0.1258[1686855544189189174,1686855892513513500] 1686936871.55s|----------------------------------------L0.1258-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 3mb total:"
- - "L0 "
- - "L0.?[1686855544189189174,1686855729962162145] 1686936871.55s 2mb|--------------------L0.?---------------------| "
- - "L0.?[1686855729962162146,1686855892513513500] 1686936871.55s 2mb |------------------L0.?------------------| "
- - "Committing partition 1:"
- - " Soft Deleting 11 files: L0.969, L0.971, L0.975, L0.977, L0.1244, L0.1246, L0.1256, L0.1258, L1.1406, L1.1407, L1.1408"
- - " Creating 23 files"
- - "**** Simulation run 300, type=split(ReduceOverlap)(split_times=[1686854034336087198]). 1 Input Files, 38mb total:"
- - "L0, all files 38mb "
- - "L0.1415[1686853686459459448,1686854243778378365] 1686935546.05s|----------------------------------------L0.1415-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 38mb total:"
- - "L0 "
- - "L0.?[1686853686459459448,1686854034336087198] 1686935546.05s 23mb|-------------------------L0.?-------------------------| "
- - "L0.?[1686854034336087199,1686854243778378365] 1686935546.05s 14mb |-------------L0.?--------------| "
- - "**** Simulation run 301, type=split(ReduceOverlap)(split_times=[1686855311077579811]). 1 Input Files, 22mb total:"
- - "L0, all files 22mb "
- - "L0.1423[1686854986870270256,1686855311972972960] 1686935546.05s|----------------------------------------L0.1423-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 22mb total:"
- - "L0 "
- - "L0.?[1686854986870270256,1686855311077579811] 1686935546.05s 22mb|-----------------------------------------L0.?------------------------------------------| "
- - "L0.?[1686855311077579812,1686855311972972960] 1686935546.05s 62kb |L0.?|"
- - "**** Simulation run 302, type=split(ReduceOverlap)(split_times=[1686854034336087198]). 1 Input Files, 12mb total:"
- - "L0, all files 12mb "
- - "L0.1417[1686853686459459448,1686854243778378365] 1686936871.55s|----------------------------------------L0.1417-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 12mb total:"
- - "L0 "
- - "L0.?[1686853686459459448,1686854034336087198] 1686936871.55s 7mb|-------------------------L0.?-------------------------| "
- - "L0.?[1686854034336087199,1686854243778378365] 1686936871.55s 4mb |-------------L0.?--------------| "
- - "**** Simulation run 303, type=split(ReduceOverlap)(split_times=[1686855311077579811]). 1 Input Files, 7mb total:"
- - "L0, all files 7mb "
- - "L0.1425[1686854986870270256,1686855311972972960] 1686936871.55s|----------------------------------------L0.1425-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 7mb total:"
- - "L0 "
- - "L0.?[1686854986870270256,1686855311077579811] 1686936871.55s 7mb|-----------------------------------------L0.?------------------------------------------| "
- - "L0.?[1686855311077579812,1686855311972972960] 1686936871.55s 19kb |L0.?|"
- - "Committing partition 1:"
- - " Soft Deleting 4 files: L0.1415, L0.1417, L0.1423, L0.1425"
- - " Creating 8 files"
- - "**** Simulation run 304, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1686857724225846697, 1686858975397639357]). 19 Input Files, 297mb total:"
- - "L0 "
- - "L0.1260[1686856473054054037,1686856473054054039] 1686932677.39s 1b|L0.1260| "
- - "L0.1035[1686856473054054040,1686856536496842959] 1686932677.39s 5mb|L0.1035| "
- - "L0.979[1686856536496842960,1686857053594594580] 1686932677.39s 42mb |--L0.979--| "
- - "L0.1265[1686857053594594581,1686857401918918899] 1686932677.39s 28mb |L0.1265| "
- - "L0.1266[1686857401918918900,1686857634135135120] 1686932677.39s 19mb |L0.1266| "
- - "L0.1121[1686857634135135121,1686857924405405390] 1686932677.39s 24mb |L0.1121| "
- - "L0.1122[1686857924405405391,1686858214675675659] 1686932677.39s 24mb |L0.1122| "
- - "L0.1327[1686858214675675660,1686858240168622590] 1686932677.39s 2mb |L0.1327| "
- - "L0.1328[1686858240168622591,1686858278525917085] 1686932677.39s 3mb |L0.1328| "
- - "L0.1283[1686858278525917086,1686858330783783762] 1686932677.39s 4mb |L0.1283| "
- - "L0.1284[1686858330783783763,1686858795216216200] 1686932677.39s 38mb |-L0.1284-| "
- - "L0.1123[1686858795216216201,1686859019000000000] 1686932677.39s 19mb |L0.1123| "
- - "L0.1285[1686859019000000001,1686859259648648625] 1686932677.39s 20mb |L0.1285| "
- - "L0.1286[1686859259648648626,1686859375756756740] 1686932677.39s 10mb |L0.1286| "
- - "L1 "
- - "L1.1235[1686856473054054037,1686857401918918899] 1686931893.7s 14mb|------L1.1235-------| "
- - "L1.1236[1686857401918918900,1686858240168622590] 1686931893.7s 13mb |-----L1.1236------| "
- - "L1.1273[1686858240168622591,1686858330783783762] 1686931893.7s 1mb |L1.1273| "
- - "L1.1274[1686858330783783763,1686859259648648625] 1686931893.7s 14mb |------L1.1274-------| "
- - "L1.1275[1686859259648648626,1686860188513513488] 1686931893.7s 14mb |------L1.1275-------| "
- - "**** 3 Output Files (parquet_file_id not yet assigned), 297mb total:"
- - "L1 "
- - "L1.?[1686856473054054037,1686857724225846697] 1686932677.39s 100mb|------------L1.?------------| "
- - "L1.?[1686857724225846698,1686858975397639357] 1686932677.39s 100mb |------------L1.?------------| "
- - "L1.?[1686858975397639358,1686860188513513488] 1686932677.39s 97mb |-----------L1.?------------| "
- - "Committing partition 1:"
- - " Soft Deleting 19 files: L0.979, L0.1035, L0.1121, L0.1122, L0.1123, L1.1235, L1.1236, L0.1260, L0.1265, L0.1266, L1.1273, L1.1274, L1.1275, L0.1283, L0.1284, L0.1285, L0.1286, L0.1327, L0.1328"
- - " Creating 3 files"
- - "**** Simulation run 305, type=split(HighL0OverlapTotalBacklog)(split_times=[1686857216145945927]). 1 Input Files, 100mb total:"
- - "L1, all files 100mb "
- - "L1.1440[1686856473054054037,1686857724225846697] 1686932677.39s|----------------------------------------L1.1440-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L1 "
- - "L1.?[1686856473054054037,1686857216145945927] 1686932677.39s 59mb|-----------------------L1.?------------------------| "
- - "L1.?[1686857216145945928,1686857724225846697] 1686932677.39s 41mb |---------------L1.?---------------| "
- - "**** Simulation run 306, type=split(HighL0OverlapTotalBacklog)(split_times=[1686857216145945927]). 1 Input Files, 29mb total:"
- - "L0, all files 29mb "
- - "L0.1267[1686857053594594581,1686857401918918899] 1686935947.46s|----------------------------------------L0.1267-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 29mb total:"
- - "L0 "
- - "L0.?[1686857053594594581,1686857216145945927] 1686935947.46s 14mb|-----------------L0.?------------------| "
- - "L0.?[1686857216145945928,1686857401918918899] 1686935947.46s 16mb |---------------------L0.?---------------------| "
- - "**** Simulation run 307, type=split(HighL0OverlapTotalBacklog)(split_times=[1686857216145945927]). 1 Input Files, 2mb total:"
- - "L0, all files 2mb "
- - "L0.1269[1686857053594594581,1686857401918918899] 1686936871.55s|----------------------------------------L0.1269-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 2mb total:"
- - "L0 "
- - "L0.?[1686857053594594581,1686857216145945927] 1686936871.55s 1mb|-----------------L0.?------------------| "
- - "L0.?[1686857216145945928,1686857401918918899] 1686936871.55s 1mb |---------------------L0.?---------------------| "
- - "**** Simulation run 308, type=split(HighL0OverlapTotalBacklog)(split_times=[1686857959237837817, 1686858702329729707]). 1 Input Files, 100mb total:"
- - "L1, all files 100mb "
- - "L1.1441[1686857724225846698,1686858975397639357] 1686932677.39s|----------------------------------------L1.1441-----------------------------------------|"
- - "**** 3 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L1 "
- - "L1.?[1686857724225846698,1686857959237837817] 1686932677.39s 19mb|-----L1.?-----| "
- - "L1.?[1686857959237837818,1686858702329729707] 1686932677.39s 59mb |-----------------------L1.?------------------------| "
- - "L1.?[1686858702329729708,1686858975397639357] 1686932677.39s 22mb |------L1.?-------| "
- - "**** Simulation run 309, type=split(HighL0OverlapTotalBacklog)(split_times=[1686857959237837817]). 1 Input Files, 24mb total:"
- - "L0, all files 24mb "
- - "L0.1166[1686857924405405391,1686858214675675659] 1686935947.46s|----------------------------------------L0.1166-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 24mb total:"
- - "L0 "
- - "L0.?[1686857924405405391,1686857959237837817] 1686935947.46s 3mb|--L0.?--| "
- - "L0.?[1686857959237837818,1686858214675675659] 1686935947.46s 21mb |------------------------------------L0.?-------------------------------------| "
- - "**** Simulation run 310, type=split(HighL0OverlapTotalBacklog)(split_times=[1686858702329729707]). 1 Input Files, 3mb total:"
- - "L0, all files 3mb "
- - "L0.1272[1686858330783783763,1686858795216216200] 1686936871.55s|----------------------------------------L0.1272-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 3mb total:"
- - "L0 "
- - "L0.?[1686858330783783763,1686858702329729707] 1686936871.55s 2mb|--------------------------------L0.?---------------------------------| "
- - "L0.?[1686858702329729708,1686858795216216200] 1686936871.55s 626kb |------L0.?------| "
- - "**** Simulation run 311, type=split(HighL0OverlapTotalBacklog)(split_times=[1686859445421621597]). 1 Input Files, 97mb total:"
- - "L1, all files 97mb "
- - "L1.1442[1686858975397639358,1686860188513513488] 1686932677.39s|----------------------------------------L1.1442-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 97mb total:"
- - "L1 "
- - "L1.?[1686858975397639358,1686859445421621597] 1686932677.39s 38mb|--------------L1.?--------------| "
- - "L1.?[1686859445421621598,1686860188513513488] 1686932677.39s 59mb |------------------------L1.?-------------------------| "
- - "**** Simulation run 312, type=split(HighL0OverlapTotalBacklog)(split_times=[1686859445421621597]). 1 Input Files, 10mb total:"
- - "L0, all files 10mb "
- - "L0.1125[1686859375756756741,1686859499000000000] 1686932677.39s|----------------------------------------L0.1125----------------------------------------| "
- - "**** 2 Output Files (parquet_file_id not yet assigned), 10mb total:"
- - "L0 "
- - "L0.?[1686859375756756741,1686859445421621597] 1686932677.39s 6mb|----------------------L0.?----------------------| "
- - "L0.?[1686859445421621598,1686859499000000000] 1686932677.39s 5mb |----------------L0.?-----------------| "
- - "**** Simulation run 313, type=split(HighL0OverlapTotalBacklog)(split_times=[1686859445421621597]). 1 Input Files, 10mb total:"
- - "L0, all files 10mb "
- - "L0.1169[1686859375756756741,1686859499000000000] 1686935947.46s|----------------------------------------L0.1169----------------------------------------| "
- - "**** 2 Output Files (parquet_file_id not yet assigned), 10mb total:"
- - "L0 "
- - "L0.?[1686859375756756741,1686859445421621597] 1686935947.46s 6mb|----------------------L0.?----------------------| "
- - "L0.?[1686859445421621598,1686859499000000000] 1686935947.46s 4mb |----------------L0.?-----------------| "
- - "**** Simulation run 314, type=split(HighL0OverlapTotalBacklog)(split_times=[1686859445421621597]). 1 Input Files, 408kb total:"
- - "L0, all files 408kb "
- - "L0.1210[1686859375756756741,1686859499000000000] 1686936871.55s|----------------------------------------L0.1210----------------------------------------| "
- - "**** 2 Output Files (parquet_file_id not yet assigned), 408kb total:"
- - "L0 "
- - "L0.?[1686859375756756741,1686859445421621597] 1686936871.55s 231kb|----------------------L0.?----------------------| "
- - "L0.?[1686859445421621598,1686859499000000000] 1686936871.55s 178kb |----------------L0.?-----------------| "
- - "**** Simulation run 315, type=split(HighL0OverlapTotalBacklog)(split_times=[1686857959237837817]). 1 Input Files, 2mb total:"
- - "L0, all files 2mb "
- - "L0.1207[1686857924405405391,1686858214675675659] 1686936871.55s|----------------------------------------L0.1207-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 2mb total:"
- - "L0 "
- - "L0.?[1686857924405405391,1686857959237837817] 1686936871.55s 235kb|--L0.?--| "
- - "L0.?[1686857959237837818,1686858214675675659] 1686936871.55s 2mb |------------------------------------L0.?-------------------------------------| "
- - "**** Simulation run 316, type=split(HighL0OverlapTotalBacklog)(split_times=[1686858702329729707]). 1 Input Files, 39mb total:"
- - "L0, all files 39mb "
- - "L0.1282[1686858330783783763,1686858795216216200] 1686935947.46s|----------------------------------------L0.1282-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 39mb total:"
- - "L0 "
- - "L0.?[1686858330783783763,1686858702329729707] 1686935947.46s 31mb|--------------------------------L0.?---------------------------------| "
- - "L0.?[1686858702329729708,1686858795216216200] 1686935947.46s 8mb |------L0.?------| "
- - "Committing partition 1:"
- - " Soft Deleting 12 files: L0.1125, L0.1166, L0.1169, L0.1207, L0.1210, L0.1267, L0.1269, L0.1272, L0.1282, L1.1440, L1.1441, L1.1442"
- - " Creating 25 files"
- - "**** Simulation run 317, type=split(ReduceOverlap)(split_times=[1686857724225846697]). 1 Input Files, 24mb total:"
- - "L0, all files 24mb "
- - "L0.1165[1686857634135135121,1686857924405405390] 1686935947.46s|----------------------------------------L0.1165-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 24mb total:"
- - "L0 "
- - "L0.?[1686857634135135121,1686857724225846697] 1686935947.46s 8mb|----------L0.?-----------| "
- - "L0.?[1686857724225846698,1686857924405405390] 1686935947.46s 17mb |----------------------------L0.?----------------------------| "
- - "**** Simulation run 318, type=split(ReduceOverlap)(split_times=[1686858975397639357]). 1 Input Files, 19mb total:"
- - "L0, all files 19mb "
- - "L0.1167[1686858795216216201,1686859019000000000] 1686935947.46s|----------------------------------------L0.1167-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 19mb total:"
- - "L0 "
- - "L0.?[1686858795216216201,1686858975397639357] 1686935947.46s 15mb|---------------------------------L0.?---------------------------------| "
- - "L0.?[1686858975397639358,1686859019000000000] 1686935947.46s 4mb |-----L0.?------| "
- - "**** Simulation run 319, type=split(ReduceOverlap)(split_times=[1686857724225846697]). 1 Input Files, 2mb total:"
- - "L0, all files 2mb "
- - "L0.1206[1686857634135135121,1686857924405405390] 1686936871.55s|----------------------------------------L0.1206-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 2mb total:"
- - "L0 "
- - "L0.?[1686857634135135121,1686857724225846697] 1686936871.55s 607kb|----------L0.?-----------| "
- - "L0.?[1686857724225846698,1686857924405405390] 1686936871.55s 1mb |----------------------------L0.?----------------------------| "
- - "**** Simulation run 320, type=split(ReduceOverlap)(split_times=[1686858975397639357]). 1 Input Files, 741kb total:"
- - "L0, all files 741kb "
- - "L0.1208[1686858795216216201,1686859019000000000] 1686936871.55s|----------------------------------------L0.1208-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 741kb total:"
- - "L0 "
- - "L0.?[1686858795216216201,1686858975397639357] 1686936871.55s 597kb|---------------------------------L0.?---------------------------------| "
- - "L0.?[1686858975397639358,1686859019000000000] 1686936871.55s 144kb |-----L0.?------| "
- - "Committing partition 1:"
- - " Soft Deleting 4 files: L0.1165, L0.1167, L0.1206, L0.1208"
- - " Creating 8 files"
- - "**** Simulation run 321, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1686860002800686531, 1686861030203733704]). 14 Input Files, 299mb total:"
- - "L0 "
- - "L0.1458[1686859375756756741,1686859445421621597] 1686932677.39s 6mb |L0.1458| "
- - "L0.1459[1686859445421621598,1686859499000000000] 1686932677.39s 5mb |L0.1459| "
- - "L0.1126[1686859499000000001,1686859666027027010] 1686932677.39s 14mb |L0.1126| "
- - "L0.1127[1686859666027027011,1686859956297297279] 1686932677.39s 25mb |L0.1127| "
- - "L0.1059[1686859956297297280,1686859969989511638] 1686932677.39s 1mb |L0.1059| "
- - "L0.1293[1686859969989511639,1686860188513513488] 1686932677.39s 19mb |L0.1293| "
- - "L0.1294[1686860188513513489,1686860536837837820] 1686932677.39s 30mb |L0.1294-| "
- - "L0.1297[1686860536837837821,1686861117378378351] 1686932677.39s 49mb |----L0.1297----| "
- - "L0.1298[1686861117378378352,1686861117378378360] 1686932677.39s 1b |L0.1298| "
- - "L0.1128[1686861117378378361,1686861407648648630] 1686932677.39s 25mb |L0.1128| "
- - "L1 "
- - "L1.1456[1686858975397639358,1686859445421621597] 1686932677.39s 38mb|--L1.1456--| "
- - "L1.1457[1686859445421621598,1686860188513513488] 1686932677.39s 59mb |------L1.1457------| "
- - "L1.1276[1686860188513513489,1686861117378378351] 1686931893.7s 14mb |---------L1.1276---------| "
- - "L1.1277[1686861117378378352,1686862046243243214] 1686931893.7s 14mb |---------L1.1277---------| "
- - "**** 3 Output Files (parquet_file_id not yet assigned), 299mb total:"
- - "L1 "
- - "L1.?[1686858975397639358,1686860002800686531] 1686932677.39s 100mb|------------L1.?------------| "
- - "L1.?[1686860002800686532,1686861030203733704] 1686932677.39s 100mb |------------L1.?------------| "
- - "L1.?[1686861030203733705,1686862046243243214] 1686932677.39s 99mb |-----------L1.?------------| "
- - "Committing partition 1:"
- - " Soft Deleting 14 files: L0.1059, L0.1126, L0.1127, L0.1128, L1.1276, L1.1277, L0.1293, L0.1294, L0.1297, L0.1298, L1.1456, L1.1457, L0.1458, L0.1459"
- - " Creating 3 files"
- - "**** Simulation run 322, type=split(HighL0OverlapTotalBacklog)(split_times=[1686859589566760129]). 1 Input Files, 100mb total:"
- - "L1, all files 100mb "
- - "L1.1476[1686858975397639358,1686860002800686531] 1686932677.39s|----------------------------------------L1.1476-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L1 "
- - "L1.?[1686858975397639358,1686859589566760129] 1686932677.39s 60mb|-----------------------L1.?------------------------| "
- - "L1.?[1686859589566760130,1686860002800686531] 1686932677.39s 40mb |---------------L1.?---------------| "
- - "**** Simulation run 323, type=split(HighL0OverlapTotalBacklog)(split_times=[1686859589566760129]). 1 Input Files, 14mb total:"
- - "L0, all files 14mb "
- - "L0.1170[1686859499000000001,1686859666027027010] 1686935947.46s|----------------------------------------L0.1170-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 14mb total:"
- - "L0 "
- - "L0.?[1686859499000000001,1686859589566760129] 1686935947.46s 8mb|---------------------L0.?---------------------| "
- - "L0.?[1686859589566760130,1686859666027027010] 1686935947.46s 6mb |-----------------L0.?------------------| "
- - "**** Simulation run 324, type=split(HighL0OverlapTotalBacklog)(split_times=[1686859589566760129]). 1 Input Files, 553kb total:"
- - "L0, all files 553kb "
- - "L0.1211[1686859499000000001,1686859666027027010] 1686936871.55s|----------------------------------------L0.1211-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 553kb total:"
- - "L0 "
- - "L0.?[1686859499000000001,1686859589566760129] 1686936871.55s 300kb|---------------------L0.?---------------------| "
- - "L0.?[1686859589566760130,1686859666027027010] 1686936871.55s 253kb |-----------------L0.?------------------| "
- - "**** Simulation run 325, type=split(HighL0OverlapTotalBacklog)(split_times=[1686860203735880900, 1686860817905001671]). 1 Input Files, 100mb total:"
- - "L1, all files 100mb "
- - "L1.1477[1686860002800686532,1686861030203733704] 1686932677.39s|----------------------------------------L1.1477-----------------------------------------|"
- - "**** 3 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L1 "
- - "L1.?[1686860002800686532,1686860203735880900] 1686932677.39s 20mb|-----L1.?------| "
- - "L1.?[1686860203735880901,1686860817905001671] 1686932677.39s 60mb |-----------------------L1.?------------------------| "
- - "L1.?[1686860817905001672,1686861030203733704] 1686932677.39s 21mb |------L1.?------| "
- - "**** Simulation run 326, type=split(HighL0OverlapTotalBacklog)(split_times=[1686860203735880900]). 1 Input Files, 29mb total:"
- - "L0, all files 29mb "
- - "L0.1296[1686860188513513489,1686860536837837820] 1686935947.46s|----------------------------------------L0.1296-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 29mb total:"
- - "L0 "
- - "L0.?[1686860188513513489,1686860203735880900] 1686935947.46s 1mb|L0.?| "
- - "L0.?[1686860203735880901,1686860536837837820] 1686935947.46s 28mb |----------------------------------------L0.?----------------------------------------| "
- - "**** Simulation run 327, type=split(HighL0OverlapTotalBacklog)(split_times=[1686860203735880900]). 1 Input Files, 1mb total:"
- - "L0, all files 1mb "
- - "L0.1292[1686860188513513489,1686860536837837820] 1686936871.55s|----------------------------------------L0.1292-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:"
- - "L0 "
- - "L0.?[1686860188513513489,1686860203735880900] 1686936871.55s 50kb|L0.?| "
- - "L0.?[1686860203735880901,1686860536837837820] 1686936871.55s 1mb |----------------------------------------L0.?----------------------------------------| "
- - "**** Simulation run 328, type=split(HighL0OverlapTotalBacklog)(split_times=[1686860817905001671]). 1 Input Files, 49mb total:"
- - "L0, all files 49mb "
- - "L0.1299[1686860536837837821,1686861117378378351] 1686935947.46s|----------------------------------------L0.1299-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 49mb total:"
- - "L0 "
- - "L0.?[1686860536837837821,1686860817905001671] 1686935947.46s 24mb|------------------L0.?-------------------| "
- - "L0.?[1686860817905001672,1686861117378378351] 1686935947.46s 25mb |--------------------L0.?--------------------| "
- - "**** Simulation run 329, type=split(HighL0OverlapTotalBacklog)(split_times=[1686860817905001671]). 1 Input Files, 2mb total:"
- - "L0, all files 2mb "
- - "L0.1301[1686860536837837821,1686861117378378351] 1686936871.55s|----------------------------------------L0.1301-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 2mb total:"
- - "L0 "
- - "L0.?[1686860536837837821,1686860817905001671] 1686936871.55s 931kb|------------------L0.?-------------------| "
- - "L0.?[1686860817905001672,1686861117378378351] 1686936871.55s 992kb |--------------------L0.?--------------------| "
- - "**** Simulation run 330, type=split(HighL0OverlapTotalBacklog)(split_times=[1686861432074122442]). 1 Input Files, 99mb total:"
- - "L1, all files 99mb "
- - "L1.1478[1686861030203733705,1686862046243243214] 1686932677.39s|----------------------------------------L1.1478-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 99mb total:"
- - "L1 "
- - "L1.?[1686861030203733705,1686861432074122442] 1686932677.39s 39mb|--------------L1.?---------------| "
- - "L1.?[1686861432074122443,1686862046243243214] 1686932677.39s 60mb |------------------------L1.?------------------------| "
- - "**** Simulation run 331, type=split(HighL0OverlapTotalBacklog)(split_times=[1686861432074122442]). 1 Input Files, 25mb total:"
- - "L0, all files 25mb "
- - "L0.1129[1686861407648648631,1686861697918918899] 1686932677.39s|----------------------------------------L0.1129-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 25mb total:"
- - "L0 "
- - "L0.?[1686861407648648631,1686861432074122442] 1686932677.39s 2mb|L0.?-| "
- - "L0.?[1686861432074122443,1686861697918918899] 1686932677.39s 23mb |--------------------------------------L0.?--------------------------------------| "
- - "**** Simulation run 332, type=split(HighL0OverlapTotalBacklog)(split_times=[1686861432074122442]). 1 Input Files, 24mb total:"
- - "L0, all files 24mb "
- - "L0.1173[1686861407648648631,1686861697918918899] 1686935947.46s|----------------------------------------L0.1173-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 24mb total:"
- - "L0 "
- - "L0.?[1686861407648648631,1686861432074122442] 1686935947.46s 2mb|L0.?-| "
- - "L0.?[1686861432074122443,1686861697918918899] 1686935947.46s 22mb |--------------------------------------L0.?--------------------------------------| "
- - "**** Simulation run 333, type=split(HighL0OverlapTotalBacklog)(split_times=[1686861432074122442]). 1 Input Files, 962kb total:"
- - "L0, all files 962kb "
- - "L0.1214[1686861407648648631,1686861697918918899] 1686936871.55s|----------------------------------------L0.1214-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 962kb total:"
- - "L0 "
- - "L0.?[1686861407648648631,1686861432074122442] 1686936871.55s 81kb|L0.?-| "
- - "L0.?[1686861432074122443,1686861697918918899] 1686936871.55s 881kb |--------------------------------------L0.?--------------------------------------| "
- - "Committing partition 1:"
- - " Soft Deleting 12 files: L0.1129, L0.1170, L0.1173, L0.1211, L0.1214, L0.1292, L0.1296, L0.1299, L0.1301, L1.1476, L1.1477, L1.1478"
- - " Creating 25 files"
- - "**** Simulation run 334, type=split(ReduceOverlap)(split_times=[1686860002800686531]). 1 Input Files, 17mb total:"
- - "L0, all files 17mb "
- - "L0.1295[1686859988431489231,1686860188513513488] 1686935947.46s|----------------------------------------L0.1295-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 17mb total:"
- - "L0 "
- - "L0.?[1686859988431489231,1686860002800686531] 1686935947.46s 1mb|L0.?| "
- - "L0.?[1686860002800686532,1686860188513513488] 1686935947.46s 16mb |--------------------------------------L0.?---------------------------------------| "
- - "**** Simulation run 335, type=split(ReduceOverlap)(split_times=[1686861030203733704]). 1 Input Files, 25mb total:"
- - "L0, all files 25mb "
- - "L0.1493[1686860817905001672,1686861117378378351] 1686935947.46s|----------------------------------------L0.1493-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 25mb total:"
- - "L0 "
- - "L0.?[1686860817905001672,1686861030203733704] 1686935947.46s 18mb|----------------------------L0.?-----------------------------| "
- - "L0.?[1686861030203733705,1686861117378378351] 1686935947.46s 7mb |----------L0.?----------| "
- - "**** Simulation run 336, type=split(ReduceOverlap)(split_times=[1686860002800686531]). 1 Input Files, 769kb total:"
- - "L0, all files 769kb "
- - "L0.1291[1686859956297297280,1686860188513513488] 1686936871.55s|----------------------------------------L0.1291----------------------------------------| "
- - "**** 2 Output Files (parquet_file_id not yet assigned), 769kb total:"
- - "L0 "
- - "L0.?[1686859956297297280,1686860002800686531] 1686936871.55s 154kb|------L0.?------| "
- - "L0.?[1686860002800686532,1686860188513513488] 1686936871.55s 615kb |--------------------------------L0.?---------------------------------| "
- - "**** Simulation run 337, type=split(ReduceOverlap)(split_times=[1686861030203733704]). 1 Input Files, 992kb total:"
- - "L0, all files 992kb "
- - "L0.1495[1686860817905001672,1686861117378378351] 1686936871.55s|----------------------------------------L0.1495-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 992kb total:"
- - "L0 "
- - "L0.?[1686860817905001672,1686861030203733704] 1686936871.55s 703kb|----------------------------L0.?-----------------------------| "
- - "L0.?[1686861030203733705,1686861117378378351] 1686936871.55s 289kb |----------L0.?----------| "
- - "Committing partition 1:"
- - " Soft Deleting 4 files: L0.1291, L0.1295, L0.1493, L0.1495"
- - " Creating 8 files"
- - "**** Simulation run 338, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1686862070968521403, 1686863111733309101]). 12 Input Files, 276mb total:"
- - "L0 "
- - "L0.1498[1686861407648648631,1686861432074122442] 1686932677.39s 2mb |L0.1498| "
- - "L0.1499[1686861432074122443,1686861697918918899] 1686932677.39s 23mb |L0.1499| "
- - "L0.1074[1686861697918918900,1686861711611133258] 1686932677.39s 1mb |L0.1074| "
- - "L0.1305[1686861711611133259,1686862046243243214] 1686932677.39s 28mb |L0.1305-| "
- - "L0.1306[1686862046243243215,1686862278459459440] 1686932677.39s 20mb |L0.1306| "
- - "L0.1081[1686862278459459441,1686862858999999980] 1686932677.39s 49mb |----L0.1081-----| "
- - "L0.1309[1686862858999999981,1686862975108108077] 1686932677.39s 10mb |L0.1309| "
- - "L0.1310[1686862975108108078,1686863149270270250] 1686932677.39s 15mb |L0.1310| "
- - "L1 "
- - "L1.1496[1686861030203733705,1686861432074122442] 1686932677.39s 39mb|-L1.1496--| "
- - "L1.1497[1686861432074122443,1686862046243243214] 1686932677.39s 60mb |-----L1.1497-----| "
- - "L1.1278[1686862046243243215,1686862975108108077] 1686931893.7s 14mb |----------L1.1278----------| "
- - "L1.1279[1686862975108108078,1686863903972972940] 1686931893.7s 14mb |----------L1.1279----------| "
- - "**** 3 Output Files (parquet_file_id not yet assigned), 276mb total:"
- - "L1 "
- - "L1.?[1686861030203733705,1686862070968521403] 1686932677.39s 100mb|-------------L1.?-------------| "
- - "L1.?[1686862070968521404,1686863111733309101] 1686932677.39s 100mb |-------------L1.?-------------| "
- - "L1.?[1686863111733309102,1686863903972972940] 1686932677.39s 76mb |---------L1.?---------| "
- - "Committing partition 1:"
- - " Soft Deleting 12 files: L0.1074, L0.1081, L1.1278, L1.1279, L0.1305, L0.1306, L0.1309, L0.1310, L1.1496, L1.1497, L0.1498, L0.1499"
- - " Creating 3 files"
- - "**** Simulation run 339, type=split(CompactAndSplitOutput(ManySmallFiles))(split_times=[1686842547242379893, 1686843715484759786]). 18 Input Files, 298mb total:"
- - "L0 "
- - "L0.1149[1686841379000000000,1686842249810810810] 1686935947.46s 72mb|------L0.1149-------| "
- - "L0.1342[1686842249810810811,1686842540081081080] 1686935947.46s 24mb |L0.1342| "
- - "L0.1352[1686842540081081081,1686842571022320444] 1686935947.46s 3mb |L0.1352| "
- - "L0.1353[1686842571022320445,1686842593136179151] 1686935947.46s 2mb |L0.1353| "
- - "L0.939[1686842593136179152,1686843120621621620] 1686935947.46s 43mb |--L0.939---| "
- - "L0.1348[1686843120621621621,1686843701162162160] 1686935947.46s 48mb |--L0.1348---| "
- - "L0.1354[1686843701162162161,1686843763044640888] 1686935947.46s 5mb |L0.1354| "
- - "L0.1355[1686843763044640889,1686843991432432430] 1686935947.46s 19mb |L0.1355| "
- - "L0.1152[1686843991432432431,1686844334757800771] 1686935947.46s 28mb |L0.1152| "
- - "L0.944[1686844334757800772,1686844862243243240] 1686935947.46s 43mb |--L0.944---| "
- - "L0.1184[1686841379000000000,1686842249810810810] 1686936871.55s 3mb|------L0.1184-------| "
- - "L0.1344[1686842249810810811,1686842540081081080] 1686936871.55s 962kb |L0.1344| "
- - "L0.1356[1686842540081081081,1686842571022320444] 1686936871.55s 103kb |L0.1356| "
- - "L0.1357[1686842571022320445,1686843120621621620] 1686936871.55s 2mb |--L0.1357---| "
- - "L0.1350[1686843120621621621,1686843701162162160] 1686936871.55s 2mb |--L0.1350---| "
- - "L0.1358[1686843701162162161,1686843763044640888] 1686936871.55s 205kb |L0.1358| "
- - "L0.1359[1686843763044640889,1686843991432432430] 1686936871.55s 757kb |L0.1359| "
- - "L0.1187[1686843991432432431,1686844862243243240] 1686936871.55s 3mb |------L0.1187-------| "
- - "**** 3 Output Files (parquet_file_id not yet assigned), 298mb total:"
- - "L0 "
- - "L0.?[1686841379000000000,1686842547242379893] 1686936871.55s 100mb|------------L0.?------------| "
- - "L0.?[1686842547242379894,1686843715484759786] 1686936871.55s 100mb |------------L0.?------------| "
- - "L0.?[1686843715484759787,1686844862243243240] 1686936871.55s 98mb |-----------L0.?------------| "
- - "Committing partition 1:"
- - " Soft Deleting 18 files: L0.939, L0.944, L0.1149, L0.1152, L0.1184, L0.1187, L0.1342, L0.1344, L0.1348, L0.1350, L0.1352, L0.1353, L0.1354, L0.1355, L0.1356, L0.1357, L0.1358, L0.1359"
- - " Creating 3 files"
- - "**** Simulation run 340, type=split(CompactAndSplitOutput(ManySmallFiles))(split_times=[1686846030485623133, 1686847198728003025]). 20 Input Files, 298mb total:"
- - "L0 "
- - "L0.1153[1686844862243243241,1686845579000000000] 1686935947.46s 59mb|----L0.1153-----| "
- - "L0.1154[1686845579000000001,1686845733054054050] 1686935947.46s 13mb |L0.1154| "
- - "L0.1365[1686845733054054051,1686846023324324320] 1686935947.46s 24mb |L0.1365| "
- - "L0.1375[1686846023324324321,1686846054770701976] 1686935947.46s 3mb |L0.1375| "
- - "L0.1376[1686846054770701977,1686846076379422391] 1686935947.46s 2mb |L0.1376| "
- - "L0.949[1686846076379422392,1686846603864864860] 1686935947.46s 43mb |--L0.949---| "
- - "L0.1371[1686846603864864861,1686847184405405399] 1686935947.46s 48mb |--L0.1371---| "
- - "L0.1377[1686847184405405400,1686847247298160711] 1686935947.46s 5mb |L0.1377| "
- - "L0.1378[1686847247298160712,1686847474675675670] 1686935947.46s 19mb |L0.1378| "
- - "L0.1157[1686847474675675671,1686847818001044011] 1686935947.46s 28mb |L0.1157| "
- - "L0.953[1686847818001044012,1686848345486486480] 1686935947.46s 43mb |--L0.953---| "
- - "L0.1188[1686844862243243241,1686845579000000000] 1686936871.55s 2mb|----L0.1188-----| "
- - "L0.1189[1686845579000000001,1686845733054054050] 1686936871.55s 510kb |L0.1189| "
- - "L0.1367[1686845733054054051,1686846023324324320] 1686936871.55s 962kb |L0.1367| "
- - "L0.1379[1686846023324324321,1686846054770701976] 1686936871.55s 104kb |L0.1379| "
- - "L0.1380[1686846054770701977,1686846603864864860] 1686936871.55s 2mb |--L0.1380---| "
- - "L0.1373[1686846603864864861,1686847184405405399] 1686936871.55s 2mb |--L0.1373---| "
- - "L0.1381[1686847184405405400,1686847247298160711] 1686936871.55s 208kb |L0.1381| "
- - "L0.1382[1686847247298160712,1686847474675675670] 1686936871.55s 753kb |L0.1382| "
- - "L0.1192[1686847474675675671,1686848345486486480] 1686936871.55s 3mb |------L0.1192-------| "
- - "**** 3 Output Files (parquet_file_id not yet assigned), 298mb total:"
- - "L0 "
- - "L0.?[1686844862243243241,1686846030485623133] 1686936871.55s 100mb|------------L0.?------------| "
- - "L0.?[1686846030485623134,1686847198728003025] 1686936871.55s 100mb |------------L0.?------------| "
- - "L0.?[1686847198728003026,1686848345486486480] 1686936871.55s 98mb |-----------L0.?------------| "
- - "Committing partition 1:"
- - " Soft Deleting 20 files: L0.949, L0.953, L0.1153, L0.1154, L0.1157, L0.1188, L0.1189, L0.1192, L0.1365, L0.1367, L0.1371, L0.1373, L0.1375, L0.1376, L0.1377, L0.1378, L0.1379, L0.1380, L0.1381, L0.1382"
- - " Creating 3 files"
- - "**** Simulation run 341, type=split(CompactAndSplitOutput(ManySmallFiles))(split_times=[1686849491497710570, 1686850637508934659]). 19 Input Files, 228mb total:"
- - "L0 "
- - "L0.1158[1686848345486486481,1686849216297297290] 1686935947.46s 71mb|----------L0.1158----------| "
- - "L0.1388[1686849216297297291,1686849506567567560] 1686935947.46s 24mb |L0.1388| "
- - "L0.1398[1686849506567567561,1686849559289331160] 1686935947.46s 4mb |L0.1398| "
- - "L0.1399[1686849559289331161,1686849568759166090] 1686935947.46s 793kb |L0.1399| "
- - "L0.1160[1686849568759166091,1686849779000000000] 1686935947.46s 17mb |L0.1160| "
- - "L0.1161[1686849779000000001,1686850087108108100] 1686935947.46s 25mb |L0.1161-| "
- - "L0.1162[1686850087108108101,1686850559000000000] 1686935947.46s 39mb |---L0.1162----| "
- - "L0.1394[1686850559000000001,1686850667648648639] 1686935947.46s 9mb |L0.1394| "
- - "L0.1400[1686850667648648640,1686850773092175839] 1686935947.46s 9mb |L0.1400| "
- - "L0.1401[1686850773092175840,1686850957918918910] 1686935947.46s 15mb |L0.1401|"
- - "L0.1193[1686848345486486481,1686849216297297290] 1686936871.55s 6mb|----------L0.1193----------| "
- - "L0.1390[1686849216297297291,1686849506567567560] 1686936871.55s 2mb |L0.1390| "
- - "L0.1402[1686849506567567561,1686849559289331160] 1686936871.55s 342kb |L0.1402| "
- - "L0.1403[1686849559289331161,1686849779000000000] 1686936871.55s 1mb |L0.1403| "
- - "L0.1195[1686849779000000001,1686850087108108100] 1686936871.55s 2mb |L0.1195-| "
- - "L0.1196[1686850087108108101,1686850559000000000] 1686936871.55s 2mb |---L0.1196----| "
- - "L0.1396[1686850559000000001,1686850667648648639] 1686936871.55s 360kb |L0.1396| "
- - "L0.1404[1686850667648648640,1686850773092175839] 1686936871.55s 349kb |L0.1404| "
- - "L0.1405[1686850773092175840,1686850957918918910] 1686936871.55s 612kb |L0.1405|"
- - "**** 3 Output Files (parquet_file_id not yet assigned), 228mb total:"
- - "L0 "
- - "L0.?[1686848345486486481,1686849491497710570] 1686936871.55s 100mb|----------------L0.?-----------------| "
- - "L0.?[1686849491497710571,1686850637508934659] 1686936871.55s 100mb |----------------L0.?-----------------| "
- - "L0.?[1686850637508934660,1686850957918918910] 1686936871.55s 28mb |--L0.?---| "
- - "Committing partition 1:"
- - " Soft Deleting 19 files: L0.1158, L0.1160, L0.1161, L0.1162, L0.1193, L0.1195, L0.1196, L0.1388, L0.1390, L0.1394, L0.1396, L0.1398, L0.1399, L0.1400, L0.1401, L0.1402, L0.1403, L0.1404, L0.1405"
- - " Creating 3 files"
- - "**** Simulation run 342, type=split(CompactAndSplitOutput(ManySmallFiles))(split_times=[1686851943085513215, 1686852928252107519]). 15 Input Files, 277mb total:"
- - "L0 "
- - "L0.1140[1686851828729729721,1686852699540540530] 1686934966.48s 62mb |---------L0.1140----------| "
- - "L0.1237[1686852699540540531,1686852757594594584] 1686934966.48s 4mb |L0.1237| "
- - "L0.1238[1686852757594594585,1686853236700974398] 1686934966.48s 34mb |---L0.1238---| "
- - "L0.1413[1686853236700974399,1686853500686486475] 1686934966.48s 19mb |L0.1413| "
- - "L0.1414[1686853500686486476,1686853570351351340] 1686934966.48s 5mb |L0.1414|"
- - "L0.1243[1686853570351351341,1686853686459459447] 1686935546.05s 8mb |L0.1243|"
- - "L0.1164[1686850957918918911,1686851301244287251] 1686935947.46s 28mb|-L0.1164-| "
- - "L0.963[1686851301244287252,1686851828729729720] 1686935947.46s 43mb |----L0.963-----| "
- - "L0.1198[1686850957918918911,1686851828729729720] 1686936871.55s 3mb|---------L0.1198----------| "
- - "L0.1199[1686851828729729721,1686852699540540530] 1686936871.55s 34mb |---------L0.1199----------| "
- - "L0.1239[1686852699540540531,1686852757594594584] 1686936871.55s 2mb |L0.1239| "
- - "L0.1240[1686852757594594585,1686853222027027016] 1686936871.55s 18mb |---L0.1240---| "
- - "L0.1411[1686853222027027017,1686853500686486475] 1686936871.55s 11mb |L0.1411| "
- - "L0.1412[1686853500686486476,1686853570351351340] 1686936871.55s 3mb |L0.1412|"
- - "L0.1245[1686853570351351341,1686853686459459447] 1686936871.55s 2mb |L0.1245|"
- - "**** 3 Output Files (parquet_file_id not yet assigned), 277mb total:"
- - "L0 "
- - "L0.?[1686850957918918911,1686851943085513215] 1686936871.55s 100mb|-------------L0.?-------------| "
- - "L0.?[1686851943085513216,1686852928252107519] 1686936871.55s 100mb |-------------L0.?-------------| "
- - "L0.?[1686852928252107520,1686853686459459447] 1686936871.55s 77mb |---------L0.?----------| "
- - "Committing partition 1:"
- - " Soft Deleting 15 files: L0.963, L0.1140, L0.1164, L0.1198, L0.1199, L0.1237, L0.1238, L0.1239, L0.1240, L0.1243, L0.1245, L0.1411, L0.1412, L0.1413, L0.1414"
- - " Creating 3 files"
- - "**** Simulation run 343, type=split(CompactAndSplitOutput(ManySmallFiles))(split_times=[1686854918102788682, 1686856149746117916]). 20 Input Files, 226mb total:"
- - "L0 "
- - "L0.1432[1686853686459459448,1686854034336087198] 1686935546.05s 23mb|-L0.1432-| "
- - "L0.1433[1686854034336087199,1686854243778378365] 1686935546.05s 14mb |L0.1433| "
- - "L0.1416[1686854243778378366,1686854441162162150] 1686935546.05s 13mb |L0.1416| "
- - "L0.1249[1686854441162162151,1686854615324324310] 1686935546.05s 12mb |L0.1249| "
- - "L0.1250[1686854615324324311,1686854819000000000] 1686935546.05s 14mb |L0.1250| "
- - "L0.1146[1686854819000000001,1686854963648648636] 1686935546.05s 10mb |L0.1146| "
- - "L0.1422[1686854963648648637,1686854986870270255] 1686935546.05s 2mb |L0.1422| "
- - "L0.1434[1686854986870270256,1686855311077579811] 1686935546.05s 22mb |L0.1434-| "
- - "L0.1435[1686855311077579812,1686855311972972960] 1686935546.05s 62kb |L0.1435| "
- - "L0.1255[1686855311972972961,1686855544189189173] 1686935742.51s 19mb |L0.1255| "
- - "L0.1428[1686855544189189174,1686855729962162145] 1686935742.51s 15mb |L0.1428| "
- - "L0.1429[1686855729962162146,1686855892513513500] 1686935742.51s 13mb |L0.1429| "
- - "L0.1147[1686855892513513501,1686856182783783770] 1686935742.51s 23mb |L0.1147| "
- - "L0.1261[1686856182783783771,1686856473054054036] 1686935742.51s 23mb |L0.1261| "
- - "L0.1262[1686856473054054037,1686856473054054039] 1686935742.51s 1b |L0.1262|"
- - "L0.1436[1686853686459459448,1686854034336087198] 1686936871.55s 7mb|-L0.1436-| "
- - "L0.1437[1686854034336087199,1686854243778378365] 1686936871.55s 4mb |L0.1437| "
- - "L0.1418[1686854243778378366,1686854441162162150] 1686936871.55s 4mb |L0.1418| "
- - "L0.1251[1686854441162162151,1686854615324324310] 1686936871.55s 4mb |L0.1251| "
- - "L0.1252[1686854615324324311,1686854819000000000] 1686936871.55s 4mb |L0.1252| "
- - "**** 3 Output Files (parquet_file_id not yet assigned), 226mb total:"
- - "L0 "
- - "L0.?[1686853686459459448,1686854918102788682] 1686936871.55s 100mb|----------------L0.?-----------------| "
- - "L0.?[1686854918102788683,1686856149746117916] 1686936871.55s 100mb |----------------L0.?-----------------| "
- - "L0.?[1686856149746117917,1686856473054054039] 1686936871.55s 26mb |--L0.?--| "
- - "Committing partition 1:"
- - " Soft Deleting 20 files: L0.1146, L0.1147, L0.1249, L0.1250, L0.1251, L0.1252, L0.1255, L0.1261, L0.1262, L0.1416, L0.1418, L0.1422, L0.1428, L0.1429, L0.1432, L0.1433, L0.1434, L0.1435, L0.1436, L0.1437"
- - " Creating 3 files"
- - "**** Simulation run 344, type=split(CompactAndSplitOutput(ManySmallFiles))(split_times=[1686856142243243231]). 10 Input Files, 22mb total:"
- - "L0 "
- - "L0.1203[1686854819000000001,1686854963648648636] 1686936871.55s 3mb|L0.1203| "
- - "L0.1424[1686854963648648637,1686854986870270255] 1686936871.55s 499kb |L0.1424| "
- - "L0.1438[1686854986870270256,1686855311077579811] 1686936871.55s 7mb |----L0.1438----| "
- - "L0.1439[1686855311077579812,1686855311972972960] 1686936871.55s 19kb |L0.1439| "
- - "L0.1257[1686855311972972961,1686855544189189173] 1686936871.55s 2mb |-L0.1257--| "
- - "L0.1430[1686855544189189174,1686855729962162145] 1686936871.55s 2mb |L0.1430-| "
- - "L0.1431[1686855729962162146,1686855892513513500] 1686936871.55s 2mb |L0.1431| "
- - "L0.1204[1686855892513513501,1686856182783783770] 1686936871.55s 3mb |---L0.1204---| "
- - "L0.1263[1686856182783783771,1686856473054054036] 1686936871.55s 3mb |---L0.1263---| "
- - "L0.1264[1686856473054054037,1686856473054054039] 1686936871.55s 1b |L0.1264|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 22mb total:"
- - "L0 "
- - "L0.?[1686854819000000001,1686856142243243231] 1686936871.55s 17mb|--------------------------------L0.?---------------------------------| "
- - "L0.?[1686856142243243232,1686856473054054039] 1686936871.55s 4mb |-----L0.?------| "
- - "Committing partition 1:"
- - " Soft Deleting 10 files: L0.1203, L0.1204, L0.1257, L0.1263, L0.1264, L0.1424, L0.1430, L0.1431, L0.1438, L0.1439"
- - " Creating 2 files"
- - "**** Simulation run 345, type=split(CompactAndSplitOutput(ManySmallFiles))(split_times=[1686857679795016021, 1686858886535978002]). 20 Input Files, 251mb total:"
- - "L0 "
- - "L0.1038[1686856473054054040,1686856564304278603] 1686935742.51s 7mb|L0.1038| "
- - "L0.981[1686856564304278604,1686857053594594580] 1686935742.51s 39mb |---L0.981---| "
- - "L0.1445[1686857053594594581,1686857216145945927] 1686935947.46s 14mb |L0.1445| "
- - "L0.1446[1686857216145945928,1686857401918918899] 1686935947.46s 16mb |L0.1446| "
- - "L0.1268[1686857401918918900,1686857634135135120] 1686935947.46s 19mb |L0.1268| "
- - "L0.1468[1686857634135135121,1686857724225846697] 1686935947.46s 8mb |L0.1468| "
- - "L0.1469[1686857724225846698,1686857924405405390] 1686935947.46s 17mb |L0.1469| "
- - "L0.1452[1686857924405405391,1686857959237837817] 1686935947.46s 3mb |L0.1452| "
- - "L0.1453[1686857959237837818,1686858214675675659] 1686935947.46s 21mb |L0.1453| "
- - "L0.1333[1686858214675675660,1686858240168622590] 1686935947.46s 2mb |L0.1333| "
- - "L0.1334[1686858240168622591,1686858251288003855] 1686935947.46s 951kb |L0.1334| "
- - "L0.1281[1686858251288003856,1686858330783783762] 1686935947.46s 7mb |L0.1281| "
- - "L0.1466[1686858330783783763,1686858702329729707] 1686935947.46s 31mb |-L0.1466-| "
- - "L0.1467[1686858702329729708,1686858795216216200] 1686935947.46s 8mb |L0.1467| "
- - "L0.1470[1686858795216216201,1686858975397639357] 1686935947.46s 15mb |L0.1470| "
- - "L0.1471[1686858975397639358,1686859019000000000] 1686935947.46s 4mb |L0.1471| "
- - "L0.1287[1686859019000000001,1686859259648648625] 1686935947.46s 20mb |L0.1287| "
- - "L0.1288[1686859259648648626,1686859375756756740] 1686935947.46s 10mb |L0.1288|"
- - "L0.1460[1686859375756756741,1686859445421621597] 1686935947.46s 6mb |L0.1460|"
- - "L0.1461[1686859445421621598,1686859499000000000] 1686935947.46s 4mb |L0.1461|"
- - "**** 3 Output Files (parquet_file_id not yet assigned), 251mb total:"
- - "L0 "
- - "L0.?[1686856473054054040,1686857679795016021] 1686935947.46s 100mb|--------------L0.?---------------| "
- - "L0.?[1686857679795016022,1686858886535978002] 1686935947.46s 100mb |--------------L0.?---------------| "
- - "L0.?[1686858886535978003,1686859499000000000] 1686935947.46s 51mb |------L0.?------| "
- - "Committing partition 1:"
- - " Soft Deleting 20 files: L0.981, L0.1038, L0.1268, L0.1281, L0.1287, L0.1288, L0.1333, L0.1334, L0.1445, L0.1446, L0.1452, L0.1453, L0.1460, L0.1461, L0.1466, L0.1467, L0.1468, L0.1469, L0.1470, L0.1471"
- - " Creating 3 files"
- - "**** Simulation run 346, type=split(CompactAndSplitOutput(ManySmallFiles))(split_times=[1686859027432432416]). 20 Input Files, 33mb total:"
- - "L0 "
- - "L0.1481[1686859499000000001,1686859589566760129] 1686935947.46s 8mb |L0.1481|"
- - "L0.1482[1686859589566760130,1686859666027027010] 1686935947.46s 6mb |L0.1482|"
- - "L0.1041[1686856473054054040,1686857053594594580] 1686936871.55s 6mb|---L0.1041----| "
- - "L0.1447[1686857053594594581,1686857216145945927] 1686936871.55s 1mb |L0.1447| "
- - "L0.1448[1686857216145945928,1686857401918918899] 1686936871.55s 1mb |L0.1448| "
- - "L0.1270[1686857401918918900,1686857634135135120] 1686936871.55s 2mb |L0.1270| "
- - "L0.1472[1686857634135135121,1686857724225846697] 1686936871.55s 607kb |L0.1472| "
- - "L0.1473[1686857724225846698,1686857924405405390] 1686936871.55s 1mb |L0.1473| "
- - "L0.1464[1686857924405405391,1686857959237837817] 1686936871.55s 235kb |L0.1464| "
- - "L0.1465[1686857959237837818,1686858214675675659] 1686936871.55s 2mb |L0.1465| "
- - "L0.1335[1686858214675675660,1686858240168622590] 1686936871.55s 172kb |L0.1335| "
- - "L0.1336[1686858240168622591,1686858330783783762] 1686936871.55s 611kb |L0.1336| "
- - "L0.1454[1686858330783783763,1686858702329729707] 1686936871.55s 2mb |L0.1454-| "
- - "L0.1455[1686858702329729708,1686858795216216200] 1686936871.55s 626kb |L0.1455| "
- - "L0.1474[1686858795216216201,1686858975397639357] 1686936871.55s 597kb |L0.1474| "
- - "L0.1475[1686858975397639358,1686859019000000000] 1686936871.55s 144kb |L0.1475| "
- - "L0.1289[1686859019000000001,1686859259648648625] 1686936871.55s 797kb |L0.1289| "
- - "L0.1290[1686859259648648626,1686859375756756740] 1686936871.55s 385kb |L0.1290| "
- - "L0.1462[1686859375756756741,1686859445421621597] 1686936871.55s 231kb |L0.1462|"
- - "L0.1463[1686859445421621598,1686859499000000000] 1686936871.55s 178kb |L0.1463|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 33mb total:"
- - "L0 "
- - "L0.?[1686856473054054040,1686859027432432416] 1686936871.55s 27mb|---------------------------------L0.?---------------------------------| "
- - "L0.?[1686859027432432417,1686859666027027010] 1686936871.55s 7mb |-----L0.?------| "
- - "Committing partition 1:"
- - " Soft Deleting 20 files: L0.1041, L0.1270, L0.1289, L0.1290, L0.1335, L0.1336, L0.1447, L0.1448, L0.1454, L0.1455, L0.1462, L0.1463, L0.1464, L0.1465, L0.1472, L0.1473, L0.1474, L0.1475, L0.1481, L0.1482"
- - " Creating 2 files"
- - "**** Simulation run 347, type=compact(ManySmallFiles). 2 Input Files, 553kb total:"
- - "L0 "
- - "L0.1483[1686859499000000001,1686859589566760129] 1686936871.55s 300kb|-------------------L0.1483--------------------| "
- - "L0.1484[1686859589566760130,1686859666027027010] 1686936871.55s 253kb |----------------L0.1484----------------| "
- - "**** 1 Output Files (parquet_file_id not yet assigned), 553kb total:"
- - "L0, all files 553kb "
- - "L0.?[1686859499000000001,1686859666027027010] 1686936871.55s|------------------------------------------L0.?------------------------------------------|"
- - "Committing partition 1:"
- - " Soft Deleting 2 files: L0.1483, L0.1484"
- - " Creating 1 files"
- - "**** Simulation run 348, type=split(CompactAndSplitOutput(ManySmallFiles))(split_times=[1686860866085039343, 1686862066143051675]). 20 Input Files, 276mb total:"
- - "L0 "
- - "L0.1171[1686859666027027011,1686859956297297279] 1686935947.46s 24mb|L0.1171| "
- - "L0.1062[1686859956297297280,1686859988431489230] 1686935947.46s 3mb |L0.1062| "
- - "L0.1504[1686859988431489231,1686860002800686531] 1686935947.46s 1mb |L0.1504| "
- - "L0.1505[1686860002800686532,1686860188513513488] 1686935947.46s 16mb |L0.1505| "
- - "L0.1488[1686860188513513489,1686860203735880900] 1686935947.46s 1mb |L0.1488| "
- - "L0.1489[1686860203735880901,1686860536837837820] 1686935947.46s 28mb |L0.1489| "
- - "L0.1492[1686860536837837821,1686860817905001671] 1686935947.46s 24mb |L0.1492| "
- - "L0.1506[1686860817905001672,1686861030203733704] 1686935947.46s 18mb |L0.1506| "
- - "L0.1507[1686861030203733705,1686861117378378351] 1686935947.46s 7mb |L0.1507| "
- - "L0.1300[1686861117378378352,1686861117378378360] 1686935947.46s 1b |L0.1300| "
- - "L0.1172[1686861117378378361,1686861407648648630] 1686935947.46s 24mb |L0.1172| "
- - "L0.1500[1686861407648648631,1686861432074122442] 1686935947.46s 2mb |L0.1500| "
- - "L0.1501[1686861432074122443,1686861697918918899] 1686935947.46s 22mb |L0.1501| "
- - "L0.1077[1686861697918918900,1686861730053110850] 1686935947.46s 3mb |L0.1077| "
- - "L0.1307[1686861730053110851,1686862046243243214] 1686935947.46s 26mb |L0.1307| "
- - "L0.1308[1686862046243243215,1686862278459459440] 1686935947.46s 19mb |L0.1308| "
- - "L0.1084[1686862278459459441,1686862858999999980] 1686935947.46s 46mb |---L0.1084---| "
- - "L0.1311[1686862858999999981,1686862975108108077] 1686935947.46s 9mb |L0.1311|"
- - "L0.1212[1686859666027027011,1686859956297297279] 1686936871.55s 962kb|L0.1212| "
- - "L0.1508[1686859956297297280,1686860002800686531] 1686936871.55s 154kb |L0.1508| "
- - "**** 3 Output Files (parquet_file_id not yet assigned), 276mb total:"
- - "L0 "
- - "L0.?[1686859666027027011,1686860866085039343] 1686936871.55s 100mb|-------------L0.?-------------| "
- - "L0.?[1686860866085039344,1686862066143051675] 1686936871.55s 100mb |-------------L0.?-------------| "
- - "L0.?[1686862066143051676,1686862975108108077] 1686936871.55s 76mb |---------L0.?---------| "
- - "Committing partition 1:"
- - " Soft Deleting 20 files: L0.1062, L0.1077, L0.1084, L0.1171, L0.1172, L0.1212, L0.1300, L0.1307, L0.1308, L0.1311, L0.1488, L0.1489, L0.1492, L0.1500, L0.1501, L0.1504, L0.1505, L0.1506, L0.1507, L0.1508"
- - " Creating 3 files"
- - "**** Simulation run 349, type=compact(ManySmallFiles). 14 Input Files, 12mb total:"
- - "L0 "
- - "L0.1509[1686860002800686532,1686860188513513488] 1686936871.55s 615kb|L0.1509| "
- - "L0.1490[1686860188513513489,1686860203735880900] 1686936871.55s 50kb |L0.1490| "
- - "L0.1491[1686860203735880901,1686860536837837820] 1686936871.55s 1mb |L0.1491-| "
- - "L0.1494[1686860536837837821,1686860817905001671] 1686936871.55s 931kb |L0.1494| "
- - "L0.1510[1686860817905001672,1686861030203733704] 1686936871.55s 703kb |L0.1510| "
- - "L0.1511[1686861030203733705,1686861117378378351] 1686936871.55s 289kb |L0.1511| "
- - "L0.1302[1686861117378378352,1686861117378378360] 1686936871.55s 1b |L0.1302| "
- - "L0.1213[1686861117378378361,1686861407648648630] 1686936871.55s 962kb |L0.1213| "
- - "L0.1502[1686861407648648631,1686861432074122442] 1686936871.55s 81kb |L0.1502| "
- - "L0.1503[1686861432074122443,1686861697918918899] 1686936871.55s 881kb |L0.1503| "
- - "L0.1303[1686861697918918900,1686862046243243214] 1686936871.55s 1mb |L0.1303-| "
- - "L0.1304[1686862046243243215,1686862278459459440] 1686936871.55s 769kb |L0.1304| "
- - "L0.1087[1686862278459459441,1686862858999999980] 1686936871.55s 4mb |----L0.1087----| "
- - "L0.1313[1686862858999999981,1686862975108108077] 1686936871.55s 782kb |L0.1313|"
- - "**** 1 Output Files (parquet_file_id not yet assigned), 12mb total:"
- - "L0, all files 12mb "
- - "L0.?[1686860002800686532,1686862975108108077] 1686936871.55s|------------------------------------------L0.?------------------------------------------|"
- - "Committing partition 1:"
- - " Soft Deleting 14 files: L0.1087, L0.1213, L0.1302, L0.1303, L0.1304, L0.1313, L0.1490, L0.1491, L0.1494, L0.1502, L0.1503, L0.1509, L0.1510, L0.1511"
- - " Creating 1 files"
- - "**** Simulation run 350, type=split(CompactAndSplitOutput(ManySmallFiles))(split_times=[1686863763854983772, 1686864552601859466]). 20 Input Files, 243mb total:"
- - "L0 "
- - "L0.1131[1686863149270270251,1686863439540540519] 1686932677.39s 25mb |--L0.1131--| "
- - "L0.1083[1686863439540540520,1686863453232754878] 1686932677.39s 1mb |L0.1083| "
- - "L0.1132[1686863453232754879,1686863699000000000] 1686932677.39s 21mb |-L0.1132-| "
- - "L0.1315[1686863699000000001,1686863903972972940] 1686932677.39s 17mb |L0.1315| "
- - "L0.1316[1686863903972972941,1686864020081081060] 1686932677.39s 10mb |L0.1316| "
- - "L0.1329[1686864020081081061,1686864651607515459] 1686934966.48s 42mb |----------L0.1329----------| "
- - "L0.1330[1686864651607515460,1686864832837837803] 1686934966.48s 12mb |L0.1330| "
- - "L0.1322[1686864832837837804,1686864890891891870] 1686934966.48s 4mb |L0.1322|"
- - "L0.1312[1686862975108108078,1686863149270270250] 1686935947.46s 14mb|L0.1312| "
- - "L0.1175[1686863149270270251,1686863439540540519] 1686935947.46s 23mb |--L0.1175--| "
- - "L0.1086[1686863439540540520,1686863528879205201] 1686935947.46s 7mb |L0.1086| "
- - "L0.1176[1686863528879205202,1686863699000000000] 1686935947.46s 14mb |L0.1176| "
- - "L0.1317[1686863699000000001,1686863903972972940] 1686935947.46s 16mb |L0.1317| "
- - "L0.1318[1686863903972972941,1686864020081081060] 1686935947.46s 9mb |L0.1318| "
- - "L0.1314[1686862975108108078,1686863149270270250] 1686936871.55s 1mb|L0.1314| "
- - "L0.1216[1686863149270270251,1686863439540540519] 1686936871.55s 2mb |--L0.1216--| "
- - "L0.1217[1686863439540540520,1686863699000000000] 1686936871.55s 2mb |-L0.1217--| "
- - "L0.1319[1686863699000000001,1686863903972972940] 1686936871.55s 1mb |L0.1319| "
- - "L0.1320[1686863903972972941,1686864020081081060] 1686936871.55s 782kb |L0.1320| "
- - "L0.1331[1686864020081081061,1686864651607515459] 1686936871.55s 20mb |----------L0.1331----------| "
- - "**** 3 Output Files (parquet_file_id not yet assigned), 243mb total:"
- - "L0 "
- - "L0.?[1686862975108108078,1686863763854983772] 1686936871.55s 100mb|---------------L0.?----------------| "
- - "L0.?[1686863763854983773,1686864552601859466] 1686936871.55s 100mb |---------------L0.?----------------| "
- - "L0.?[1686864552601859467,1686864890891891870] 1686936871.55s 43mb |----L0.?-----| "
- - "Committing partition 1:"
- - " Soft Deleting 20 files: L0.1083, L0.1086, L0.1131, L0.1132, L0.1175, L0.1176, L0.1216, L0.1217, L0.1312, L0.1314, L0.1315, L0.1316, L0.1317, L0.1318, L0.1319, L0.1320, L0.1322, L0.1329, L0.1330, L0.1331"
- - " Creating 3 files"
- - "**** Simulation run 351, type=compact(ManySmallFiles). 2 Input Files, 8mb total:"
- - "L0 "
- - "L0.1332[1686864651607515460,1686864832837837803] 1686936871.55s 6mb|-----------------------------L0.1332------------------------------| "
- - "L0.1324[1686864832837837804,1686864890891891870] 1686936871.55s 2mb |------L0.1324------| "
- - "**** 1 Output Files (parquet_file_id not yet assigned), 8mb total:"
- - "L0, all files 8mb "
- - "L0.?[1686864651607515460,1686864890891891870] 1686936871.55s|-----------------------------------------L0.?------------------------------------------| "
- - "Committing partition 1:"
- - " Soft Deleting 2 files: L0.1324, L0.1332"
- - " Creating 1 files"
- - "**** Simulation run 352, type=split(CompactAndSplitOutput(ManySmallFiles))(split_times=[1686865715561243055, 1686866540230594239]). 7 Input Files, 211mb total:"
- - "L0 "
- - "L0.1134[1686865761702702681,1686866632513513490] 1686932677.39s 60mb |-----------------L0.1134------------------| "
- - "L0.1143[1686864890891891871,1686865413378378356] 1686934966.48s 35mb|--------L0.1143---------| "
- - "L0.1002[1686865413378378357,1686865761702702680] 1686934966.48s 23mb |----L0.1002----| "
- - "L0.1178[1686865761702702681,1686866632513513490] 1686935947.46s 59mb |-----------------L0.1178------------------| "
- - "L0.1220[1686864890891891871,1686865413378378356] 1686936871.55s 16mb|--------L0.1220---------| "
- - "L0.1004[1686865413378378357,1686865761702702680] 1686936871.55s 11mb |----L0.1004----| "
- - "L0.1221[1686865761702702681,1686866632513513490] 1686936871.55s 6mb |-----------------L0.1221------------------| "
- - "**** 3 Output Files (parquet_file_id not yet assigned), 211mb total:"
- - "L0 "
- - "L0.?[1686864890891891871,1686865715561243055] 1686936871.55s 100mb|------------------L0.?------------------| "
- - "L0.?[1686865715561243056,1686866540230594239] 1686936871.55s 100mb |------------------L0.?------------------| "
- - "L0.?[1686866540230594240,1686866632513513490] 1686936871.55s 11mb |L0.?|"
- - "Committing partition 1:"
- - " Soft Deleting 7 files: L0.1002, L0.1004, L0.1134, L0.1143, L0.1178, L0.1220, L0.1221"
- - " Creating 3 files"
- - "**** Simulation run 353, type=split(CompactAndSplitOutput(ManySmallFiles))(split_times=[1686868931238859876, 1686869543477719751]). 8 Input Files, 293mb total:"
- - "L0 "
- - "L0.1139[1686868319000000001,1686868747944483205] 1686933271.57s 34mb|------L0.1139------| "
- - "L0.1015[1686868747944483206,1686869244945945920] 1686933271.57s 40mb |-------L0.1015--------| "
- - "L0.1019[1686869244945945921,1686869890068506247] 1686935742.51s 100mb |-----------L0.1019------------| "
- - "L0.1020[1686869890068506248,1686870115756756730] 1686935742.51s 35mb |-L0.1020-| "
- - "L0.1183[1686868319000000001,1686868896621621596] 1686935947.46s 38mb|---------L0.1183----------| "
- - "L0.1017[1686868896621621597,1686869244945945920] 1686935947.46s 23mb |----L0.1017----| "
- - "L0.1226[1686868319000000001,1686869244945945920] 1686936871.55s 9mb|------------------L0.1226-------------------| "
- - "L0.1021[1686869244945945921,1686870115756756730] 1686936871.55s 14mb |-----------------L0.1021-----------------| "
- - "**** 3 Output Files (parquet_file_id not yet assigned), 293mb total:"
- - "L0 "
- - "L0.?[1686868319000000001,1686868931238859876] 1686936871.55s 100mb|------------L0.?------------| "
- - "L0.?[1686868931238859877,1686869543477719751] 1686936871.55s 100mb |------------L0.?------------| "
- - "L0.?[1686869543477719752,1686870115756756730] 1686936871.55s 93mb |-----------L0.?-----------| "
- - "Committing partition 1:"
- - " Soft Deleting 8 files: L0.1015, L0.1017, L0.1019, L0.1020, L0.1021, L0.1139, L0.1183, L0.1226"
- - " Creating 3 files"
- - "**** Simulation run 354, type=split(CompactAndSplitOutput(ManySmallFiles))(split_times=[1686870677571828433, 1686871239386900135]). 3 Input Files, 255mb total:"
- - "L0 "
- - "L0.1022[1686870115756756731,1686870676199779489] 1686936871.55s 100mb|-------------L0.1022-------------| "
- - "L0.1023[1686870676199779490,1686870986567567540] 1686936871.55s 55mb |-----L0.1023-----| "
- - "L0.1024[1686870986567567541,1686871550514515284] 1686936871.55s 100mb |-------------L0.1024-------------| "
- - "**** 3 Output Files (parquet_file_id not yet assigned), 255mb total:"
- - "L0 "
- - "L0.?[1686870115756756731,1686870677571828433] 1686936871.55s 100mb|--------------L0.?---------------| "
- - "L0.?[1686870677571828434,1686871239386900135] 1686936871.55s 100mb |--------------L0.?---------------| "
- - "L0.?[1686871239386900136,1686871550514515284] 1686936871.55s 55mb |------L0.?-------| "
- - "Committing partition 1:"
- - " Soft Deleting 3 files: L0.1022, L0.1023, L0.1024"
- - " Creating 3 files"
- - "**** Simulation run 355, type=split(CompactAndSplitOutput(ManySmallFiles))(split_times=[1686872106056369133, 1686872661598222981]). 4 Input Files, 212mb total:"
- - "L0 "
- - "L0.1025[1686871550514515285,1686871857378378350] 1686936871.55s 54mb|-------L0.1025-------| "
- - "L0.1026[1686871619000000000,1686871857378378350] 1686936871.55s 1mb |----L0.1026-----| "
- - "L0.1027[1686871857378378351,1686872413821229216] 1686936871.55s 100mb |----------------L0.1027-----------------| "
- - "L0.1028[1686872413821229217,1686872728189189160] 1686936871.55s 56mb |-------L0.1028--------| "
- - "**** 3 Output Files (parquet_file_id not yet assigned), 212mb total:"
- - "L0 "
- - "L0.?[1686871550514515285,1686872106056369133] 1686936871.55s 100mb|------------------L0.?------------------| "
- - "L0.?[1686872106056369134,1686872661598222981] 1686936871.55s 100mb |------------------L0.?------------------| "
- - "L0.?[1686872661598222982,1686872728189189160] 1686936871.55s 12mb |L0.?|"
- - "Committing partition 1:"
- - " Soft Deleting 4 files: L0.1025, L0.1026, L0.1027, L0.1028"
- - " Creating 3 files"
- - "**** Simulation run 356, type=split(CompactAndSplitOutput(ManySmallFiles))(split_times=[1686873284631629744]). 2 Input Files, 156mb total:"
- - "L0 "
- - "L0.1029[1686872728189189161,1686873284631629744] 1686936871.55s 100mb|------------------------L0.1029------------------------| "
- - "L0.1030[1686873284631629745,1686873599000000000] 1686936871.55s 56mb |-----------L0.1030------------| "
- - "**** 2 Output Files (parquet_file_id not yet assigned), 156mb total:"
- - "L0 "
- - "L0.?[1686872728189189161,1686873284631629744] 1686936871.55s 100mb|-------------------------L0.?--------------------------| "
- - "L0.?[1686873284631629745,1686873599000000000] 1686936871.55s 56mb |-------------L0.?-------------| "
- - "Committing partition 1:"
- - " Soft Deleting 2 files: L0.1029, L0.1030"
- - " Creating 2 files"
- - "**** Simulation run 357, type=split(CompactAndSplitOutput(ManySmallFiles))(split_times=[1686867299592048857, 1686867966670584223]). 14 Input Files, 253mb total:"
- - "L0 "
- - "L0.1135[1686866632513513491,1686867209822490496] 1686932677.39s 40mb|----------L0.1135-----------| "
- - "L0.1006[1686867209822490497,1686867503324324300] 1686932677.39s 20mb |---L0.1006---| "
- - "L0.1136[1686867503324324301,1686867659000000000] 1686933271.57s 13mb |L0.1136| "
- - "L0.1137[1686867659000000001,1686867839000000000] 1686933271.57s 14mb |L0.1137| "
- - "L0.1138[1686867839000000001,1686868319000000000] 1686933271.57s 39mb |--------L0.1138--------| "
- - "L0.1179[1686866632513513491,1686867154999999976] 1686935947.46s 36mb|---------L0.1179---------| "
- - "L0.1012[1686867154999999977,1686867503324324300] 1686935947.46s 24mb |----L0.1012-----| "
- - "L0.1180[1686867503324324301,1686867659000000000] 1686935947.46s 10mb |L0.1180| "
- - "L0.1181[1686867659000000001,1686867839000000000] 1686935947.46s 12mb |L0.1181| "
- - "L0.1182[1686867839000000001,1686868319000000000] 1686935947.46s 32mb |--------L0.1182--------| "
- - "L0.1222[1686866632513513491,1686867503324324300] 1686936871.55s 6mb|------------------L0.1222-------------------| "
- - "L0.1223[1686867503324324301,1686867659000000000] 1686936871.55s 1mb |L0.1223| "
- - "L0.1224[1686867659000000001,1686867839000000000] 1686936871.55s 2mb |L0.1224| "
- - "L0.1225[1686867839000000001,1686868319000000000] 1686936871.55s 5mb |--------L0.1225--------| "
- - "**** 3 Output Files (parquet_file_id not yet assigned), 253mb total:"
- - "L0 "
- - "L0.?[1686866632513513491,1686867299592048857] 1686936871.55s 100mb|--------------L0.?---------------| "
- - "L0.?[1686867299592048858,1686867966670584223] 1686936871.55s 100mb |--------------L0.?---------------| "
- - "L0.?[1686867966670584224,1686868319000000000] 1686936871.55s 53mb |------L0.?------| "
- - "Committing partition 1:"
- - " Soft Deleting 14 files: L0.1006, L0.1012, L0.1135, L0.1136, L0.1137, L0.1138, L0.1179, L0.1180, L0.1181, L0.1182, L0.1222, L0.1223, L0.1224, L0.1225"
- - " Creating 3 files"
- - "**** Simulation run 358, type=split(HighL0OverlapTotalBacklog)(split_times=[1686842540081081080]). 1 Input Files, 100mb total:"
- - "L0, all files 100mb "
- - "L0.1515[1686841379000000000,1686842547242379893] 1686936871.55s|----------------------------------------L0.1515-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L0 "
- - "L0.?[1686841379000000000,1686842540081081080] 1686936871.55s 99mb|-----------------------------------------L0.?------------------------------------------| "
- - "L0.?[1686842540081081081,1686842547242379893] 1686936871.55s 628kb |L0.?|"
- - "**** Simulation run 359, type=split(HighL0OverlapTotalBacklog)(split_times=[1686843701162162160]). 1 Input Files, 100mb total:"
- - "L0, all files 100mb "
- - "L0.1516[1686842547242379894,1686843715484759786] 1686936871.55s|----------------------------------------L0.1516-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L0 "
- - "L0.?[1686842547242379894,1686843701162162160] 1686936871.55s 99mb|-----------------------------------------L0.?-----------------------------------------| "
- - "L0.?[1686843701162162161,1686843715484759786] 1686936871.55s 1mb |L0.?|"
- - "**** Simulation run 360, type=split(HighL0OverlapTotalBacklog)(split_times=[1686846023324324320]). 1 Input Files, 100mb total:"
- - "L0, all files 100mb "
- - "L0.1518[1686844862243243241,1686846030485623133] 1686936871.55s|----------------------------------------L0.1518-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L0 "
- - "L0.?[1686844862243243241,1686846023324324320] 1686936871.55s 99mb|-----------------------------------------L0.?------------------------------------------| "
- - "L0.?[1686846023324324321,1686846030485623133] 1686936871.55s 628kb |L0.?|"
- - "**** Simulation run 361, type=split(HighL0OverlapTotalBacklog)(split_times=[1686847184405405399]). 1 Input Files, 100mb total:"
- - "L0, all files 100mb "
- - "L0.1519[1686846030485623134,1686847198728003025] 1686936871.55s|----------------------------------------L0.1519-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L0 "
- - "L0.?[1686846030485623134,1686847184405405399] 1686936871.55s 99mb|-----------------------------------------L0.?-----------------------------------------| "
- - "L0.?[1686847184405405400,1686847198728003025] 1686936871.55s 1mb |L0.?|"
- - "**** Simulation run 362, type=split(HighL0OverlapTotalBacklog)(split_times=[1686849216297297290]). 1 Input Files, 96mb total:"
- - "L1, all files 96mb "
- - "L1.1386[1686848345486486481,1686849506567567560] 1686932677.39s|----------------------------------------L1.1386-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 96mb total:"
- - "L1 "
- - "L1.?[1686848345486486481,1686849216297297290] 1686932677.39s 72mb|------------------------------L1.?-------------------------------| "
- - "L1.?[1686849216297297291,1686849506567567560] 1686932677.39s 24mb |--------L1.?--------| "
- - "**** Simulation run 363, type=split(HighL0OverlapTotalBacklog)(split_times=[1686849216297297290]). 1 Input Files, 100mb total:"
- - "L0, all files 100mb "
- - "L0.1521[1686848345486486481,1686849491497710570] 1686936871.55s|----------------------------------------L0.1521-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L0 "
- - "L0.?[1686848345486486481,1686849216297297290] 1686936871.55s 76mb|-------------------------------L0.?-------------------------------| "
- - "L0.?[1686849216297297291,1686849491497710570] 1686936871.55s 24mb |-------L0.?--------| "
- - "**** Simulation run 364, type=split(HighL0OverlapTotalBacklog)(split_times=[1686850087108108099]). 1 Input Files, 100mb total:"
- - "L0, all files 100mb "
- - "L0.1522[1686849491497710571,1686850637508934659] 1686936871.55s|----------------------------------------L0.1522-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L0 "
- - "L0.?[1686849491497710571,1686850087108108099] 1686936871.55s 52mb|--------------------L0.?--------------------| "
- - "L0.?[1686850087108108100,1686850637508934659] 1686936871.55s 48mb |------------------L0.?-------------------| "
- - "**** Simulation run 365, type=split(HighL0OverlapTotalBacklog)(split_times=[1686850087108108099]). 1 Input Files, 91mb total:"
- - "L1, all files 91mb "
- - "L1.1392[1686849559289331161,1686850667648648639] 1686932677.39s|----------------------------------------L1.1392-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 91mb total:"
- - "L1 "
- - "L1.?[1686849559289331161,1686850087108108099] 1686932677.39s 43mb|------------------L1.?------------------| "
- - "L1.?[1686850087108108100,1686850667648648639] 1686932677.39s 48mb |--------------------L1.?---------------------| "
- - "**** Simulation run 366, type=split(HighL0OverlapTotalBacklog)(split_times=[1686850957918918908]). 1 Input Files, 28mb total:"
- - "L0, all files 28mb "
- - "L0.1523[1686850637508934660,1686850957918918910] 1686936871.55s|----------------------------------------L0.1523-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 28mb total:"
- - "L0 "
- - "L0.?[1686850637508934660,1686850957918918908] 1686936871.55s 28mb|-----------------------------------------L0.?------------------------------------------| "
- - "L0.?[1686850957918918909,1686850957918918910] 1686936871.55s 1b |L0.?|"
- - "**** Simulation run 367, type=split(HighL0OverlapTotalBacklog)(split_times=[1686850957918918908, 1686851828729729717]). 1 Input Files, 87mb total:"
- - "L1, all files 87mb "
- - "L1.1385[1686850773092175840,1686851828729729720] 1686932677.39s|----------------------------------------L1.1385-----------------------------------------|"
- - "**** 3 Output Files (parquet_file_id not yet assigned), 87mb total:"
- - "L1 "
- - "L1.?[1686850773092175840,1686850957918918908] 1686932677.39s 15mb|----L1.?-----| "
- - "L1.?[1686850957918918909,1686851828729729717] 1686932677.39s 72mb |----------------------------------L1.?----------------------------------| "
- - "L1.?[1686851828729729718,1686851828729729720] 1686932677.39s 1b |L1.?|"
- - "**** Simulation run 368, type=split(HighL0OverlapTotalBacklog)(split_times=[1686851828729729717]). 1 Input Files, 100mb total:"
- - "L0, all files 100mb "
- - "L0.1524[1686850957918918911,1686851943085513215] 1686936871.55s|----------------------------------------L0.1524-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L0 "
- - "L0.?[1686850957918918911,1686851828729729717] 1686936871.55s 88mb|------------------------------------L0.?-------------------------------------| "
- - "L0.?[1686851828729729718,1686851943085513215] 1686936871.55s 12mb |--L0.?--| "
- - "**** Simulation run 369, type=split(HighL0OverlapTotalBacklog)(split_times=[1686852699540540526]). 1 Input Files, 14mb total:"
- - "L1, all files 14mb "
- - "L1.1230[1686851828729729721,1686852757594594584] 1686931893.7s|----------------------------------------L1.1230-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 14mb total:"
- - "L1 "
- - "L1.?[1686851828729729721,1686852699540540526] 1686931893.7s 14mb|---------------------------------------L1.?---------------------------------------| "
- - "L1.?[1686852699540540527,1686852757594594584] 1686931893.7s 927kb |L1.?|"
- - "**** Simulation run 370, type=split(HighL0OverlapTotalBacklog)(split_times=[1686852699540540526]). 1 Input Files, 100mb total:"
- - "L0, all files 100mb "
- - "L0.1525[1686851943085513216,1686852928252107519] 1686936871.55s|----------------------------------------L0.1525-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L0 "
- - "L0.?[1686851943085513216,1686852699540540526] 1686936871.55s 77mb|-------------------------------L0.?--------------------------------| "
- - "L0.?[1686852699540540527,1686852928252107519] 1686936871.55s 23mb |-------L0.?-------| "
- - "**** Simulation run 371, type=split(HighL0OverlapTotalBacklog)(split_times=[1686853570351351335]). 1 Input Files, 77mb total:"
- - "L0, all files 77mb "
- - "L0.1526[1686852928252107520,1686853686459459447] 1686936871.55s|----------------------------------------L0.1526-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 77mb total:"
- - "L0 "
- - "L0.?[1686852928252107520,1686853570351351335] 1686936871.55s 65mb|-----------------------------------L0.?-----------------------------------| "
- - "L0.?[1686853570351351336,1686853686459459447] 1686936871.55s 12mb |---L0.?----| "
- - "**** Simulation run 372, type=split(HighL0OverlapTotalBacklog)(split_times=[1686853570351351335]). 1 Input Files, 42mb total:"
- - "L1, all files 42mb "
- - "L1.1410[1686853500686486476,1686854034336087198] 1686932677.39s|----------------------------------------L1.1410-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 42mb total:"
- - "L1 "
- - "L1.?[1686853500686486476,1686853570351351335] 1686932677.39s 5mb|--L1.?---| "
- - "L1.?[1686853570351351336,1686854034336087198] 1686932677.39s 36mb |------------------------------------L1.?------------------------------------| "
- - "**** Simulation run 373, type=split(HighL0OverlapTotalBacklog)(split_times=[1686854441162162144]). 1 Input Files, 100mb total:"
- - "L0, all files 100mb "
- - "L0.1527[1686853686459459448,1686854918102788682] 1686936871.55s|----------------------------------------L0.1527-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L0 "
- - "L0.?[1686853686459459448,1686854441162162144] 1686936871.55s 61mb|------------------------L0.?-------------------------| "
- - "L0.?[1686854441162162145,1686854918102788682] 1686936871.55s 39mb |--------------L0.?--------------| "
- - "**** Simulation run 374, type=split(HighL0OverlapTotalBacklog)(split_times=[1686854441162162144]). 1 Input Files, 58mb total:"
- - "L1, all files 58mb "
- - "L1.1420[1686854243778378366,1686854986870270255] 1686932677.39s|----------------------------------------L1.1420-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 58mb total:"
- - "L1 "
- - "L1.?[1686854243778378366,1686854441162162144] 1686932677.39s 15mb|--------L1.?---------| "
- - "L1.?[1686854441162162145,1686854986870270255] 1686932677.39s 43mb |------------------------------L1.?------------------------------| "
- - "**** Simulation run 375, type=split(HighL0OverlapTotalBacklog)(split_times=[1686855311972972953]). 1 Input Files, 17mb total:"
- - "L0, all files 17mb "
- - "L0.1530[1686854819000000001,1686856142243243231] 1686936871.55s|----------------------------------------L0.1530-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 17mb total:"
- - "L0 "
- - "L0.?[1686854819000000001,1686855311972972953] 1686936871.55s 7mb|-------------L0.?--------------| "
- - "L0.?[1686855311972972954,1686856142243243231] 1686936871.55s 11mb |-------------------------L0.?-------------------------| "
- - "**** Simulation run 376, type=split(HighL0OverlapTotalBacklog)(split_times=[1686855311972972953]). 1 Input Files, 100mb total:"
- - "L0, all files 100mb "
- - "L0.1528[1686854918102788683,1686856149746117916] 1686936871.55s|----------------------------------------L0.1528-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L0 "
- - "L0.?[1686854918102788683,1686855311972972953] 1686936871.55s 32mb|-----------L0.?-----------| "
- - "L0.?[1686855311972972954,1686856149746117916] 1686936871.55s 68mb |---------------------------L0.?----------------------------| "
- - "**** Simulation run 377, type=split(HighL0OverlapTotalBacklog)(split_times=[1686855311972972953]). 1 Input Files, 33mb total:"
- - "L1, all files 33mb "
- - "L1.1426[1686855311077579812,1686855729962162145] 1686932677.39s|----------------------------------------L1.1426----------------------------------------| "
- - "**** 2 Output Files (parquet_file_id not yet assigned), 33mb total:"
- - "L1 "
- - "L1.?[1686855311077579812,1686855311972972953] 1686932677.39s 72kb|L1.?| "
- - "L1.?[1686855311972972954,1686855729962162145] 1686932677.39s 33mb|-----------------------------------------L1.?------------------------------------------| "
- - "**** Simulation run 378, type=split(HighL0OverlapTotalBacklog)(split_times=[1686856182783783762]). 1 Input Files, 58mb total:"
- - "L1, all files 58mb "
- - "L1.1427[1686855729962162146,1686856473054054036] 1686932677.39s|----------------------------------------L1.1427-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 58mb total:"
- - "L1 "
- - "L1.?[1686855729962162146,1686856182783783762] 1686932677.39s 35mb|------------------------L1.?------------------------| "
- - "L1.?[1686856182783783763,1686856473054054036] 1686932677.39s 23mb |--------------L1.?---------------| "
- - "**** Simulation run 379, type=split(HighL0OverlapTotalBacklog)(split_times=[1686856182783783762]). 1 Input Files, 4mb total:"
- - "L0, all files 4mb "
- - "L0.1531[1686856142243243232,1686856473054054039] 1686936871.55s|----------------------------------------L0.1531-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 4mb total:"
- - "L0 "
- - "L0.?[1686856142243243232,1686856182783783762] 1686936871.55s 549kb|--L0.?---| "
- - "L0.?[1686856182783783763,1686856473054054039] 1686936871.55s 4mb |------------------------------------L0.?------------------------------------| "
- - "**** Simulation run 380, type=split(HighL0OverlapTotalBacklog)(split_times=[1686856182783783762]). 1 Input Files, 26mb total:"
- - "L0, all files 26mb "
- - "L0.1529[1686856149746117917,1686856473054054039] 1686936871.55s|----------------------------------------L0.1529-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 26mb total:"
- - "L0 "
- - "L0.?[1686856149746117917,1686856182783783762] 1686936871.55s 3mb|-L0.?--| "
- - "L0.?[1686856182783783763,1686856473054054039] 1686936871.55s 24mb |-------------------------------------L0.?-------------------------------------| "
- - "**** Simulation run 381, type=split(HighL0OverlapTotalBacklog)(split_times=[1686857053594594571]). 1 Input Files, 59mb total:"
- - "L1, all files 59mb "
- - "L1.1443[1686856473054054037,1686857216145945927] 1686932677.39s|----------------------------------------L1.1443-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 59mb total:"
- - "L1 "
- - "L1.?[1686856473054054037,1686857053594594571] 1686932677.39s 46mb|--------------------------------L1.?--------------------------------| "
- - "L1.?[1686857053594594572,1686857216145945927] 1686932677.39s 13mb |------L1.?-------| "
- - "**** Simulation run 382, type=split(HighL0OverlapTotalBacklog)(split_times=[1686857053594594571]). 1 Input Files, 100mb total:"
- - "L0, all files 100mb "
- - "L0.1532[1686856473054054040,1686857679795016021] 1686935947.46s|----------------------------------------L0.1532-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L0 "
- - "L0.?[1686856473054054040,1686857053594594571] 1686935947.46s 48mb|------------------L0.?-------------------| "
- - "L0.?[1686857053594594572,1686857679795016021] 1686935947.46s 52mb |--------------------L0.?--------------------| "
- - "**** Simulation run 383, type=split(HighL0OverlapTotalBacklog)(split_times=[1686857053594594571, 1686857924405405380, 1686858795216216189]). 1 Input Files, 27mb total:"
- - "L0, all files 27mb "
- - "L0.1535[1686856473054054040,1686859027432432416] 1686936871.55s|----------------------------------------L0.1535-----------------------------------------|"
- - "**** 4 Output Files (parquet_file_id not yet assigned), 27mb total:"
- - "L0 "
- - "L0.?[1686856473054054040,1686857053594594571] 1686936871.55s 6mb|-------L0.?-------| "
- - "L0.?[1686857053594594572,1686857924405405380] 1686936871.55s 9mb |------------L0.?------------| "
- - "L0.?[1686857924405405381,1686858795216216189] 1686936871.55s 9mb |------------L0.?------------| "
- - "L0.?[1686858795216216190,1686859027432432416] 1686936871.55s 2mb |-L0.?-| "
- - "**** Simulation run 384, type=split(HighL0OverlapTotalBacklog)(split_times=[1686857924405405380, 1686858795216216189]). 1 Input Files, 100mb total:"
- - "L0, all files 100mb "
- - "L0.1533[1686857679795016022,1686858886535978002] 1686935947.46s|----------------------------------------L0.1533-----------------------------------------|"
- - "**** 3 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L0 "
- - "L0.?[1686857679795016022,1686857924405405380] 1686935947.46s 20mb|------L0.?------| "
- - "L0.?[1686857924405405381,1686858795216216189] 1686935947.46s 72mb |-----------------------------L0.?-----------------------------| "
- - "L0.?[1686858795216216190,1686858886535978002] 1686935947.46s 8mb |L0.?| "
- - "**** Simulation run 385, type=split(HighL0OverlapTotalBacklog)(split_times=[1686857924405405380]). 1 Input Files, 19mb total:"
- - "L1, all files 19mb "
- - "L1.1449[1686857724225846698,1686857959237837817] 1686932677.39s|----------------------------------------L1.1449----------------------------------------| "
- - "**** 2 Output Files (parquet_file_id not yet assigned), 19mb total:"
- - "L1 "
- - "L1.?[1686857724225846698,1686857924405405380] 1686932677.39s 16mb|-----------------------------------L1.?-----------------------------------| "
- - "L1.?[1686857924405405381,1686857959237837817] 1686932677.39s 3mb |---L1.?----| "
- - "**** Simulation run 386, type=split(HighL0OverlapTotalBacklog)(split_times=[1686858795216216189]). 1 Input Files, 22mb total:"
- - "L1, all files 22mb "
- - "L1.1451[1686858702329729708,1686858975397639357] 1686932677.39s|----------------------------------------L1.1451-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 22mb total:"
- - "L1 "
- - "L1.?[1686858702329729708,1686858795216216189] 1686932677.39s 7mb|------------L1.?------------| "
- - "L1.?[1686858795216216190,1686858975397639357] 1686932677.39s 14mb |--------------------------L1.?---------------------------| "
- - "**** Simulation run 387, type=split(HighL0OverlapTotalBacklog)(split_times=[1686859666027026998]). 1 Input Files, 7mb total:"
- - "L0, all files 7mb "
- - "L0.1536[1686859027432432417,1686859666027027010] 1686936871.55s|----------------------------------------L0.1536-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 7mb total:"
- - "L0 "
- - "L0.?[1686859027432432417,1686859666027026998] 1686936871.55s 7mb|-----------------------------------------L0.?------------------------------------------| "
- - "L0.?[1686859666027026999,1686859666027027010] 1686936871.55s 1b |L0.?|"
- - "**** Simulation run 388, type=split(HighL0OverlapTotalBacklog)(split_times=[1686859666027026998]). 1 Input Files, 553kb total:"
- - "L0, all files 553kb "
- - "L0.1537[1686859499000000001,1686859666027027010] 1686936871.55s|----------------------------------------L0.1537-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 553kb total:"
- - "L0 "
- - "L0.?[1686859499000000001,1686859666027026998] 1686936871.55s 553kb|-----------------------------------------L0.?------------------------------------------| "
- - "L0.?[1686859666027026999,1686859666027027010] 1686936871.55s 1b |L0.?|"
- - "**** Simulation run 389, type=split(HighL0OverlapTotalBacklog)(split_times=[1686859666027026998]). 1 Input Files, 40mb total:"
- - "L1, all files 40mb "
- - "L1.1480[1686859589566760130,1686860002800686531] 1686932677.39s|----------------------------------------L1.1480-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 40mb total:"
- - "L1 "
- - "L1.?[1686859589566760130,1686859666027026998] 1686932677.39s 7mb|-----L1.?-----| "
- - "L1.?[1686859666027026999,1686860002800686531] 1686932677.39s 33mb |---------------------------------L1.?----------------------------------| "
- - "**** Simulation run 390, type=split(HighL0OverlapTotalBacklog)(split_times=[1686860536837837807]). 1 Input Files, 100mb total:"
- - "L0, all files 100mb "
- - "L0.1538[1686859666027027011,1686860866085039343] 1686936871.55s|----------------------------------------L0.1538-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L0 "
- - "L0.?[1686859666027027011,1686860536837837807] 1686936871.55s 73mb|-----------------------------L0.?------------------------------| "
- - "L0.?[1686860536837837808,1686860866085039343] 1686936871.55s 27mb |---------L0.?---------| "
- - "**** Simulation run 391, type=split(HighL0OverlapTotalBacklog)(split_times=[1686860536837837807, 1686861407648648616, 1686862278459459425]). 1 Input Files, 12mb total:"
- - "L0, all files 12mb "
- - "L0.1541[1686860002800686532,1686862975108108077] 1686936871.55s|----------------------------------------L0.1541-----------------------------------------|"
- - "**** 4 Output Files (parquet_file_id not yet assigned), 12mb total:"
- - "L0 "
- - "L0.?[1686860002800686532,1686860536837837807] 1686936871.55s 2mb|-----L0.?-----| "
- - "L0.?[1686860536837837808,1686861407648648616] 1686936871.55s 4mb |----------L0.?----------| "
- - "L0.?[1686861407648648617,1686862278459459425] 1686936871.55s 4mb |----------L0.?----------| "
- - "L0.?[1686862278459459426,1686862975108108077] 1686936871.55s 3mb |-------L0.?--------| "
- - "**** Simulation run 392, type=split(HighL0OverlapTotalBacklog)(split_times=[1686860536837837807]). 1 Input Files, 60mb total:"
- - "L1, all files 60mb "
- - "L1.1486[1686860203735880901,1686860817905001671] 1686932677.39s|----------------------------------------L1.1486-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 60mb total:"
- - "L1 "
- - "L1.?[1686860203735880901,1686860536837837807] 1686932677.39s 32mb|---------------------L1.?---------------------| "
- - "L1.?[1686860536837837808,1686860817905001671] 1686932677.39s 27mb |-----------------L1.?------------------| "
- - "**** Simulation run 393, type=split(HighL0OverlapTotalBacklog)(split_times=[1686861407648648616]). 1 Input Files, 100mb total:"
- - "L0, all files 100mb "
- - "L0.1539[1686860866085039344,1686862066143051675] 1686936871.55s|----------------------------------------L0.1539-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L0 "
- - "L0.?[1686860866085039344,1686861407648648616] 1686936871.55s 45mb|-----------------L0.?-----------------| "
- - "L0.?[1686861407648648617,1686862066143051675] 1686936871.55s 55mb |---------------------L0.?----------------------| "
- - "**** Simulation run 394, type=split(HighL0OverlapTotalBacklog)(split_times=[1686861407648648616]). 1 Input Files, 100mb total:"
- - "L1, all files 100mb "
- - "L1.1512[1686861030203733705,1686862070968521403] 1686932677.39s|----------------------------------------L1.1512-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L1 "
- - "L1.?[1686861030203733705,1686861407648648616] 1686932677.39s 36mb|-------------L1.?-------------| "
- - "L1.?[1686861407648648617,1686862070968521403] 1686932677.39s 64mb |-------------------------L1.?--------------------------| "
- - "**** Simulation run 395, type=split(HighL0OverlapTotalBacklog)(split_times=[1686862278459459425]). 1 Input Files, 76mb total:"
- - "L0, all files 76mb "
- - "L0.1540[1686862066143051676,1686862975108108077] 1686936871.55s|----------------------------------------L0.1540-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 76mb total:"
- - "L0 "
- - "L0.?[1686862066143051676,1686862278459459425] 1686936871.55s 18mb|-------L0.?--------| "
- - "L0.?[1686862278459459426,1686862975108108077] 1686936871.55s 58mb |-------------------------------L0.?-------------------------------| "
- - "**** Simulation run 396, type=split(HighL0OverlapTotalBacklog)(split_times=[1686862278459459425]). 1 Input Files, 100mb total:"
- - "L1, all files 100mb "
- - "L1.1513[1686862070968521404,1686863111733309101] 1686932677.39s|----------------------------------------L1.1513-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L1 "
- - "L1.?[1686862070968521404,1686862278459459425] 1686932677.39s 20mb|-----L1.?------| "
- - "L1.?[1686862278459459426,1686863111733309101] 1686932677.39s 80mb |---------------------------------L1.?---------------------------------| "
- - "**** Simulation run 397, type=split(HighL0OverlapTotalBacklog)(split_times=[1686863149270270234]). 1 Input Files, 100mb total:"
- - "L0, all files 100mb "
- - "L0.1542[1686862975108108078,1686863763854983772] 1686936871.55s|----------------------------------------L0.1542-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L0 "
- - "L0.?[1686862975108108078,1686863149270270234] 1686936871.55s 22mb|------L0.?-------| "
- - "L0.?[1686863149270270235,1686863763854983772] 1686936871.55s 78mb |--------------------------------L0.?--------------------------------| "
- - "**** Simulation run 398, type=split(HighL0OverlapTotalBacklog)(split_times=[1686863149270270234]). 1 Input Files, 76mb total:"
- - "L1, all files 76mb "
- - "L1.1514[1686863111733309102,1686863903972972940] 1686932677.39s|----------------------------------------L1.1514-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 76mb total:"
- - "L1 "
- - "L1.?[1686863111733309102,1686863149270270234] 1686932677.39s 4mb|L1.?| "
- - "L1.?[1686863149270270235,1686863903972972940] 1686932677.39s 73mb |---------------------------------------L1.?----------------------------------------| "
- - "**** Simulation run 399, type=split(HighL0OverlapTotalBacklog)(split_times=[1686864020081081043]). 1 Input Files, 100mb total:"
- - "L0, all files 100mb "
- - "L0.1543[1686863763854983773,1686864552601859466] 1686936871.55s|----------------------------------------L0.1543-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L0 "
- - "L0.?[1686863763854983773,1686864020081081043] 1686936871.55s 32mb|-----------L0.?------------| "
- - "L0.?[1686864020081081044,1686864552601859466] 1686936871.55s 68mb |---------------------------L0.?---------------------------| "
- - "**** Simulation run 400, type=split(HighL0OverlapTotalBacklog)(split_times=[1686864020081081043]). 1 Input Files, 12mb total:"
- - "L1, all files 12mb "
- - "L1.1280[1686863903972972941,1686864651607515459] 1686931893.7s|----------------------------------------L1.1280-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 12mb total:"
- - "L1 "
- - "L1.?[1686863903972972941,1686864020081081043] 1686931893.7s 2mb|---L1.?----| "
- - "L1.?[1686864020081081044,1686864651607515459] 1686931893.7s 10mb |-----------------------------------L1.?-----------------------------------| "
- - "**** Simulation run 401, type=split(HighL0OverlapTotalBacklog)(split_times=[1686864890891891852]). 1 Input Files, 43mb total:"
- - "L0, all files 43mb "
- - "L0.1544[1686864552601859467,1686864890891891870] 1686936871.55s|----------------------------------------L0.1544-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 43mb total:"
- - "L0 "
- - "L0.?[1686864552601859467,1686864890891891852] 1686936871.55s 43mb|-----------------------------------------L0.?------------------------------------------| "
- - "L0.?[1686864890891891853,1686864890891891870] 1686936871.55s 1b |L0.?|"
- - "**** Simulation run 402, type=split(HighL0OverlapTotalBacklog)(split_times=[1686864890891891852]). 1 Input Files, 8mb total:"
- - "L0, all files 8mb "
- - "L0.1545[1686864651607515460,1686864890891891870] 1686936871.55s|----------------------------------------L0.1545----------------------------------------| "
- - "**** 2 Output Files (parquet_file_id not yet assigned), 8mb total:"
- - "L0 "
- - "L0.?[1686864651607515460,1686864890891891852] 1686936871.55s 8mb|-----------------------------------------L0.?------------------------------------------| "
- - "L0.?[1686864890891891853,1686864890891891870] 1686936871.55s 1b |L0.?|"
- - "**** Simulation run 403, type=split(HighL0OverlapTotalBacklog)(split_times=[1686864890891891852, 1686865761702702661]). 1 Input Files, 14mb total:"
- - "L1, all files 14mb "
- - "L1.1326[1686864832837837804,1686865761702702680] 1686931893.7s|----------------------------------------L1.1326-----------------------------------------|"
- - "**** 3 Output Files (parquet_file_id not yet assigned), 14mb total:"
- - "L1 "
- - "L1.?[1686864832837837804,1686864890891891852] 1686931893.7s 927kb|L1.?| "
- - "L1.?[1686864890891891853,1686865761702702661] 1686931893.7s 14mb |---------------------------------------L1.?---------------------------------------| "
- - "L1.?[1686865761702702662,1686865761702702680] 1686931893.7s 1b |L1.?|"
- - "**** Simulation run 404, type=split(HighL0OverlapTotalBacklog)(split_times=[1686865761702702661]). 1 Input Files, 100mb total:"
- - "L0, all files 100mb "
- - "L0.1547[1686865715561243056,1686866540230594239] 1686936871.55s|----------------------------------------L0.1547-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L0 "
- - "L0.?[1686865715561243056,1686865761702702661] 1686936871.55s 6mb|L0.?| "
- - "L0.?[1686865761702702662,1686866540230594239] 1686936871.55s 94mb |---------------------------------------L0.?---------------------------------------| "
- - "**** Simulation run 405, type=split(HighL0OverlapTotalBacklog)(split_times=[1686868917918918910]). 1 Input Files, 100mb total:"
- - "L0, all files 100mb "
- - "L0.1549[1686868319000000001,1686868931238859876] 1686936871.55s|----------------------------------------L0.1549-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L0 "
- - "L0.?[1686868319000000001,1686868917918918910] 1686936871.55s 98mb|-----------------------------------------L0.?-----------------------------------------| "
- - "L0.?[1686868917918918911,1686868931238859876] 1686936871.55s 2mb |L0.?|"
- - "**** Simulation run 406, type=split(HighL0OverlapTotalBacklog)(split_times=[1686868917918918910]). 1 Input Files, 15mb total:"
- - "L1, all files 15mb "
- - "L1.905[1686868379000000000,1686869244945945920] 1686928854.57s|-----------------------------------------L1.905-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 15mb total:"
- - "L1 "
- - "L1.?[1686868379000000000,1686868917918918910] 1686928854.57s 9mb|-------------------------L1.?-------------------------| "
- - "L1.?[1686868917918918911,1686869244945945920] 1686928854.57s 6mb |-------------L1.?--------------| "
- - "**** Simulation run 407, type=split(HighL0OverlapTotalBacklog)(split_times=[1686869516837837819]). 1 Input Files, 100mb total:"
- - "L0, all files 100mb "
- - "L0.1550[1686868931238859877,1686869543477719751] 1686936871.55s|----------------------------------------L0.1550-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L0 "
- - "L0.?[1686868931238859877,1686869516837837819] 1686936871.55s 96mb|----------------------------------------L0.?----------------------------------------| "
- - "L0.?[1686869516837837820,1686869543477719751] 1686936871.55s 4mb |L0.?|"
- - "**** Simulation run 408, type=split(HighL0OverlapTotalBacklog)(split_times=[1686869516837837819]). 1 Input Files, 15mb total:"
- - "L1, all files 15mb "
- - "L1.906[1686869244945945921,1686870115756756730] 1686928854.57s|-----------------------------------------L1.906-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 15mb total:"
- - "L1 "
- - "L1.?[1686869244945945921,1686869516837837819] 1686928854.57s 5mb|-----------L1.?-----------| "
- - "L1.?[1686869516837837820,1686870115756756730] 1686928854.57s 10mb |---------------------------L1.?----------------------------| "
- - "**** Simulation run 409, type=split(HighL0OverlapTotalBacklog)(split_times=[1686870986567567540]). 1 Input Files, 100mb total:"
- - "L0, all files 100mb "
- - "L0.1553[1686870677571828434,1686871239386900135] 1686936871.55s|----------------------------------------L0.1553-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L0 "
- - "L0.?[1686870677571828434,1686870986567567540] 1686936871.55s 55mb|---------------------L0.?----------------------| "
- - "L0.?[1686870986567567541,1686871239386900135] 1686936871.55s 45mb |-----------------L0.?-----------------| "
- - "**** Simulation run 410, type=split(HighL0OverlapTotalBacklog)(split_times=[1686871857378378349]). 1 Input Files, 15mb total:"
- - "L1, all files 15mb "
- - "L1.908[1686870986567567541,1686871857378378350] 1686928854.57s|-----------------------------------------L1.908-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 15mb total:"
- - "L1 "
- - "L1.?[1686870986567567541,1686871857378378349] 1686928854.57s 15mb|-----------------------------------------L1.?------------------------------------------| "
- - "L1.?[1686871857378378350,1686871857378378350] 1686928854.57s 1b |L1.?|"
- - "**** Simulation run 411, type=split(HighL0OverlapTotalBacklog)(split_times=[1686871857378378349]). 1 Input Files, 100mb total:"
- - "L0, all files 100mb "
- - "L0.1555[1686871550514515285,1686872106056369133] 1686936871.55s|----------------------------------------L0.1555-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L0 "
- - "L0.?[1686871550514515285,1686871857378378349] 1686936871.55s 55mb|---------------------L0.?----------------------| "
- - "L0.?[1686871857378378350,1686872106056369133] 1686936871.55s 45mb |-----------------L0.?-----------------| "
- - "Committing partition 1:"
- - " Soft Deleting 54 files: L1.905, L1.906, L1.908, L1.1230, L1.1280, L1.1326, L1.1385, L1.1386, L1.1392, L1.1410, L1.1420, L1.1426, L1.1427, L1.1443, L1.1449, L1.1451, L1.1480, L1.1486, L1.1512, L1.1513, L1.1514, L0.1515, L0.1516, L0.1518, L0.1519, L0.1521, L0.1522, L0.1523, L0.1524, L0.1525, L0.1526, L0.1527, L0.1528, L0.1529, L0.1530, L0.1531, L0.1532, L0.1533, L0.1535, L0.1536, L0.1537, L0.1538, L0.1539, L0.1540, L0.1541, L0.1542, L0.1543, L0.1544, L0.1545, L0.1547, L0.1549, L0.1550, L0.1553, L0.1555"
- - " Creating 115 files"
- - "**** Simulation run 412, type=split(ReduceOverlap)(split_times=[1686857216145945927]). 1 Input Files, 52mb total:"
- - "L0, all files 52mb "
- - "L0.1613[1686857053594594572,1686857679795016021] 1686935947.46s|----------------------------------------L0.1613-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 52mb total:"
- - "L0 "
- - "L0.?[1686857053594594572,1686857216145945927] 1686935947.46s 13mb|--------L0.?---------| "
- - "L0.?[1686857216145945928,1686857679795016021] 1686935947.46s 38mb |------------------------------L0.?------------------------------| "
- - "**** Simulation run 413, type=split(ReduceOverlap)(split_times=[1686857724225846697]). 1 Input Files, 20mb total:"
- - "L0, all files 20mb "
- - "L0.1618[1686857679795016022,1686857924405405380] 1686935947.46s|----------------------------------------L0.1618----------------------------------------| "
- - "**** 2 Output Files (parquet_file_id not yet assigned), 20mb total:"
- - "L0 "
- - "L0.?[1686857679795016022,1686857724225846697] 1686935947.46s 4mb|-----L0.?-----| "
- - "L0.?[1686857724225846698,1686857924405405380] 1686935947.46s 17mb |---------------------------------L0.?----------------------------------| "
- - "**** Simulation run 414, type=split(ReduceOverlap)(split_times=[1686857959237837817, 1686858702329729707]). 1 Input Files, 72mb total:"
- - "L0, all files 72mb "
- - "L0.1619[1686857924405405381,1686858795216216189] 1686935947.46s|----------------------------------------L0.1619-----------------------------------------|"
- - "**** 3 Output Files (parquet_file_id not yet assigned), 72mb total:"
- - "L0 "
- - "L0.?[1686857924405405381,1686857959237837817] 1686935947.46s 3mb|L0.?| "
- - "L0.?[1686857959237837818,1686858702329729707] 1686935947.46s 62mb |-----------------------------------L0.?-----------------------------------| "
- - "L0.?[1686858702329729708,1686858795216216189] 1686935947.46s 8mb |-L0.?--| "
- - "**** Simulation run 415, type=split(ReduceOverlap)(split_times=[1686858975397639357]). 1 Input Files, 51mb total:"
- - "L0, all files 51mb "
- - "L0.1534[1686858886535978003,1686859499000000000] 1686935947.46s|----------------------------------------L0.1534-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 51mb total:"
- - "L0 "
- - "L0.?[1686858886535978003,1686858975397639357] 1686935947.46s 7mb|---L0.?----| "
- - "L0.?[1686858975397639358,1686859499000000000] 1686935947.46s 43mb |-----------------------------------L0.?-----------------------------------| "
- - "**** Simulation run 416, type=split(ReduceOverlap)(split_times=[1686842571022320444]). 1 Input Files, 99mb total:"
- - "L0, all files 99mb "
- - "L0.1565[1686842547242379894,1686843701162162160] 1686936871.55s|----------------------------------------L0.1565-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 99mb total:"
- - "L0 "
- - "L0.?[1686842547242379894,1686842571022320444] 1686936871.55s 2mb|L0.?| "
- - "L0.?[1686842571022320445,1686843701162162160] 1686936871.55s 97mb |-----------------------------------------L0.?-----------------------------------------| "
- - "**** Simulation run 417, type=split(ReduceOverlap)(split_times=[1686843763044640888]). 1 Input Files, 98mb total:"
- - "L0, all files 98mb "
- - "L0.1517[1686843715484759787,1686844862243243240] 1686936871.55s|----------------------------------------L0.1517-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 98mb total:"
- - "L0 "
- - "L0.?[1686843715484759787,1686843763044640888] 1686936871.55s 4mb|L0.?| "
- - "L0.?[1686843763044640889,1686844862243243240] 1686936871.55s 94mb |----------------------------------------L0.?----------------------------------------| "
- - "**** Simulation run 418, type=split(ReduceOverlap)(split_times=[1686846054770701976]). 1 Input Files, 99mb total:"
- - "L0, all files 99mb "
- - "L0.1569[1686846030485623134,1686847184405405399] 1686936871.55s|----------------------------------------L0.1569-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 99mb total:"
- - "L0 "
- - "L0.?[1686846030485623134,1686846054770701976] 1686936871.55s 2mb|L0.?| "
- - "L0.?[1686846054770701977,1686847184405405399] 1686936871.55s 97mb |-----------------------------------------L0.?-----------------------------------------| "
- - "**** Simulation run 419, type=split(ReduceOverlap)(split_times=[1686847247298160711]). 1 Input Files, 98mb total:"
- - "L0, all files 98mb "
- - "L0.1520[1686847198728003026,1686848345486486480] 1686936871.55s|----------------------------------------L0.1520-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 98mb total:"
- - "L0 "
- - "L0.?[1686847198728003026,1686847247298160711] 1686936871.55s 4mb|L0.?| "
- - "L0.?[1686847247298160712,1686848345486486480] 1686936871.55s 94mb |----------------------------------------L0.?----------------------------------------| "
- - "**** Simulation run 420, type=split(ReduceOverlap)(split_times=[1686849506567567560, 1686849559289331160]). 1 Input Files, 52mb total:"
- - "L0, all files 52mb "
- - "L0.1575[1686849491497710571,1686850087108108099] 1686936871.55s|----------------------------------------L0.1575-----------------------------------------|"
- - "**** 3 Output Files (parquet_file_id not yet assigned), 52mb total:"
- - "L0 "
- - "L0.?[1686849491497710571,1686849506567567560] 1686936871.55s 1mb|L0.?| "
- - "L0.?[1686849506567567561,1686849559289331160] 1686936871.55s 5mb |L0.?-| "
- - "L0.?[1686849559289331161,1686850087108108099] 1686936871.55s 46mb |------------------------------------L0.?-------------------------------------| "
- - "**** Simulation run 421, type=split(ReduceOverlap)(split_times=[1686850667648648639, 1686850773092175839]). 1 Input Files, 28mb total:"
- - "L0, all files 28mb "
- - "L0.1579[1686850637508934660,1686850957918918908] 1686936871.55s|----------------------------------------L0.1579-----------------------------------------|"
- - "**** 3 Output Files (parquet_file_id not yet assigned), 28mb total:"
- - "L0 "
- - "L0.?[1686850637508934660,1686850667648648639] 1686936871.55s 3mb|-L0.?-| "
- - "L0.?[1686850667648648640,1686850773092175839] 1686936871.55s 9mb |-----------L0.?------------| "
- - "L0.?[1686850773092175840,1686850957918918908] 1686936871.55s 16mb |----------------------L0.?-----------------------| "
- - "**** Simulation run 422, type=split(ReduceOverlap)(split_times=[1686851828729729720]). 1 Input Files, 12mb total:"
- - "L0, all files 12mb "
- - "L0.1585[1686851828729729718,1686851943085513215] 1686936871.55s|----------------------------------------L0.1585----------------------------------------| "
- - "**** 2 Output Files (parquet_file_id not yet assigned), 12mb total:"
- - "L0 "
- - "L0.?[1686851828729729718,1686851828729729720] 1686936871.55s 0b|L0.?| "
- - "L0.?[1686851828729729721,1686851943085513215] 1686936871.55s 12mb|-----------------------------------------L0.?------------------------------------------| "
- - "**** Simulation run 423, type=split(ReduceOverlap)(split_times=[1686852757594594584]). 1 Input Files, 23mb total:"
- - "L0, all files 23mb "
- - "L0.1589[1686852699540540527,1686852928252107519] 1686936871.55s|----------------------------------------L0.1589-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 23mb total:"
- - "L0 "
- - "L0.?[1686852699540540527,1686852757594594584] 1686936871.55s 6mb|--------L0.?--------| "
- - "L0.?[1686852757594594585,1686852928252107519] 1686936871.55s 17mb |------------------------------L0.?-------------------------------| "
- - "**** Simulation run 424, type=split(ReduceOverlap)(split_times=[1686853500686486475]). 1 Input Files, 65mb total:"
- - "L0, all files 65mb "
- - "L0.1590[1686852928252107520,1686853570351351335] 1686936871.55s|----------------------------------------L0.1590-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 65mb total:"
- - "L0 "
- - "L0.?[1686852928252107520,1686853500686486475] 1686936871.55s 58mb|-------------------------------------L0.?-------------------------------------| "
- - "L0.?[1686853500686486476,1686853570351351335] 1686936871.55s 7mb |-L0.?--| "
- - "**** Simulation run 425, type=split(ReduceOverlap)(split_times=[1686854034336087198, 1686854243778378365]). 1 Input Files, 61mb total:"
- - "L0, all files 61mb "
- - "L0.1594[1686853686459459448,1686854441162162144] 1686936871.55s|----------------------------------------L0.1594-----------------------------------------|"
- - "**** 3 Output Files (parquet_file_id not yet assigned), 61mb total:"
- - "L0 "
- - "L0.?[1686853686459459448,1686854034336087198] 1686936871.55s 28mb|-----------------L0.?------------------| "
- - "L0.?[1686854034336087199,1686854243778378365] 1686936871.55s 17mb |---------L0.?---------| "
- - "L0.?[1686854243778378366,1686854441162162144] 1686936871.55s 16mb |--------L0.?---------| "
- - "**** Simulation run 426, type=split(ReduceOverlap)(split_times=[1686854986870270255, 1686855311077579811]). 1 Input Files, 7mb total:"
- - "L0, all files 7mb "
- - "L0.1598[1686854819000000001,1686855311972972953] 1686936871.55s|----------------------------------------L0.1598-----------------------------------------|"
- - "**** 3 Output Files (parquet_file_id not yet assigned), 7mb total:"
- - "L0 "
- - "L0.?[1686854819000000001,1686854986870270255] 1686936871.55s 2mb|------------L0.?------------| "
- - "L0.?[1686854986870270256,1686855311077579811] 1686936871.55s 4mb |--------------------------L0.?---------------------------| "
- - "L0.?[1686855311077579812,1686855311972972953] 1686936871.55s 12kb |L0.?|"
- - "**** Simulation run 427, type=split(ReduceOverlap)(split_times=[1686854986870270255, 1686855311077579811]). 1 Input Files, 32mb total:"
- - "L0, all files 32mb "
- - "L0.1600[1686854918102788683,1686855311972972953] 1686936871.55s|----------------------------------------L0.1600-----------------------------------------|"
- - "**** 3 Output Files (parquet_file_id not yet assigned), 32mb total:"
- - "L0 "
- - "L0.?[1686854918102788683,1686854986870270255] 1686936871.55s 6mb|----L0.?-----| "
- - "L0.?[1686854986870270256,1686855311077579811] 1686936871.55s 26mb |----------------------------------L0.?----------------------------------| "
- - "L0.?[1686855311077579812,1686855311972972953] 1686936871.55s 74kb |L0.?|"
- - "**** Simulation run 428, type=split(ReduceOverlap)(split_times=[1686855729962162145]). 1 Input Files, 68mb total:"
- - "L0, all files 68mb "
- - "L0.1601[1686855311972972954,1686856149746117916] 1686936871.55s|----------------------------------------L0.1601-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 68mb total:"
- - "L0 "
- - "L0.?[1686855311972972954,1686855729962162145] 1686936871.55s 34mb|-------------------L0.?-------------------| "
- - "L0.?[1686855729962162146,1686856149746117916] 1686936871.55s 34mb |-------------------L0.?--------------------| "
- - "**** Simulation run 429, type=split(ReduceOverlap)(split_times=[1686855729962162145]). 1 Input Files, 11mb total:"
- - "L0, all files 11mb "
- - "L0.1599[1686855311972972954,1686856142243243231] 1686936871.55s|----------------------------------------L0.1599-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 11mb total:"
- - "L0 "
- - "L0.?[1686855311972972954,1686855729962162145] 1686936871.55s 6mb|-------------------L0.?--------------------| "
- - "L0.?[1686855729962162146,1686856142243243231] 1686936871.55s 5mb |-------------------L0.?-------------------| "
- - "**** Simulation run 430, type=split(ReduceOverlap)(split_times=[1686856473054054036]). 1 Input Files, 24mb total:"
- - "L0, all files 24mb "
- - "L0.1609[1686856182783783763,1686856473054054039] 1686936871.55s|----------------------------------------L0.1609-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 24mb total:"
- - "L0 "
- - "L0.?[1686856182783783763,1686856473054054036] 1686936871.55s 24mb|-----------------------------------------L0.?------------------------------------------| "
- - "L0.?[1686856473054054037,1686856473054054039] 1686936871.55s 1b |L0.?|"
- - "**** Simulation run 431, type=split(ReduceOverlap)(split_times=[1686856473054054036]). 1 Input Files, 4mb total:"
- - "L0, all files 4mb "
- - "L0.1607[1686856182783783763,1686856473054054039] 1686936871.55s|----------------------------------------L0.1607-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 4mb total:"
- - "L0 "
- - "L0.?[1686856182783783763,1686856473054054036] 1686936871.55s 4mb|-----------------------------------------L0.?------------------------------------------| "
- - "L0.?[1686856473054054037,1686856473054054039] 1686936871.55s 1b |L0.?|"
- - "**** Simulation run 432, type=split(ReduceOverlap)(split_times=[1686857216145945927, 1686857724225846697]). 1 Input Files, 9mb total:"
- - "L0, all files 9mb "
- - "L0.1615[1686857053594594572,1686857924405405380] 1686936871.55s|----------------------------------------L0.1615-----------------------------------------|"
- - "**** 3 Output Files (parquet_file_id not yet assigned), 9mb total:"
- - "L0 "
- - "L0.?[1686857053594594572,1686857216145945927] 1686936871.55s 2mb|-----L0.?-----| "
- - "L0.?[1686857216145945928,1686857724225846697] 1686936871.55s 5mb |-----------------------L0.?-----------------------| "
- - "L0.?[1686857724225846698,1686857924405405380] 1686936871.55s 2mb |-------L0.?-------| "
- - "**** Simulation run 433, type=split(ReduceOverlap)(split_times=[1686857959237837817, 1686858702329729707]). 1 Input Files, 9mb total:"
- - "L0, all files 9mb "
- - "L0.1616[1686857924405405381,1686858795216216189] 1686936871.55s|----------------------------------------L0.1616-----------------------------------------|"
- - "**** 3 Output Files (parquet_file_id not yet assigned), 9mb total:"
- - "L0 "
- - "L0.?[1686857924405405381,1686857959237837817] 1686936871.55s 374kb|L0.?| "
- - "L0.?[1686857959237837818,1686858702329729707] 1686936871.55s 8mb |-----------------------------------L0.?-----------------------------------| "
- - "L0.?[1686858702329729708,1686858795216216189] 1686936871.55s 998kb |-L0.?--| "
- - "**** Simulation run 434, type=split(ReduceOverlap)(split_times=[1686858975397639357]). 1 Input Files, 2mb total:"
- - "L0, all files 2mb "
- - "L0.1617[1686858795216216190,1686859027432432416] 1686936871.55s|----------------------------------------L0.1617-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 2mb total:"
- - "L0 "
- - "L0.?[1686858795216216190,1686858975397639357] 1686936871.55s 2mb|-------------------------------L0.?--------------------------------| "
- - "L0.?[1686858975397639358,1686859027432432416] 1686936871.55s 559kb |-------L0.?-------| "
- - "**** Simulation run 435, type=split(ReduceOverlap)(split_times=[1686859589566760129]). 1 Input Files, 7mb total:"
- - "L0, all files 7mb "
- - "L0.1625[1686859027432432417,1686859666027026998] 1686936871.55s|----------------------------------------L0.1625-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 7mb total:"
- - "L0 "
- - "L0.?[1686859027432432417,1686859589566760129] 1686936871.55s 6mb|------------------------------------L0.?-------------------------------------| "
- - "L0.?[1686859589566760130,1686859666027026998] 1686936871.55s 821kb |--L0.?--| "
- - "**** Simulation run 436, type=split(ReduceOverlap)(split_times=[1686859589566760129]). 1 Input Files, 553kb total:"
- - "L0, all files 553kb "
- - "L0.1627[1686859499000000001,1686859666027026998] 1686936871.55s|----------------------------------------L0.1627-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 553kb total:"
- - "L0 "
- - "L0.?[1686859499000000001,1686859589566760129] 1686936871.55s 300kb|---------------------L0.?---------------------| "
- - "L0.?[1686859589566760130,1686859666027026998] 1686936871.55s 253kb |-----------------L0.?------------------| "
- - "**** Simulation run 437, type=split(ReduceOverlap)(split_times=[1686860002800686531, 1686860203735880900]). 1 Input Files, 73mb total:"
- - "L0, all files 73mb "
- - "L0.1631[1686859666027027011,1686860536837837807] 1686936871.55s|----------------------------------------L0.1631-----------------------------------------|"
- - "**** 3 Output Files (parquet_file_id not yet assigned), 73mb total:"
- - "L0 "
- - "L0.?[1686859666027027011,1686860002800686531] 1686936871.55s 28mb|--------------L0.?--------------| "
- - "L0.?[1686860002800686532,1686860203735880900] 1686936871.55s 17mb |-------L0.?-------| "
- - "L0.?[1686860203735880901,1686860536837837807] 1686936871.55s 28mb |--------------L0.?--------------| "
- - "**** Simulation run 438, type=split(ReduceOverlap)(split_times=[1686860203735880900]). 1 Input Files, 2mb total:"
- - "L0, all files 2mb "
- - "L0.1633[1686860002800686532,1686860536837837807] 1686936871.55s|----------------------------------------L0.1633-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 2mb total:"
- - "L0 "
- - "L0.?[1686860002800686532,1686860203735880900] 1686936871.55s 827kb|-------------L0.?--------------| "
- - "L0.?[1686860203735880901,1686860536837837807] 1686936871.55s 1mb |-------------------------L0.?-------------------------| "
- - "**** Simulation run 439, type=split(ReduceOverlap)(split_times=[1686860817905001671, 1686861030203733704]). 1 Input Files, 4mb total:"
- - "L0, all files 4mb "
- - "L0.1634[1686860536837837808,1686861407648648616] 1686936871.55s|----------------------------------------L0.1634-----------------------------------------|"
- - "**** 3 Output Files (parquet_file_id not yet assigned), 4mb total:"
- - "L0 "
- - "L0.?[1686860536837837808,1686860817905001671] 1686936871.55s 1mb|-----------L0.?------------| "
- - "L0.?[1686860817905001672,1686861030203733704] 1686936871.55s 874kb |-------L0.?--------| "
- - "L0.?[1686861030203733705,1686861407648648616] 1686936871.55s 2mb |----------------L0.?-----------------| "
- - "**** Simulation run 440, type=split(ReduceOverlap)(split_times=[1686860817905001671]). 1 Input Files, 27mb total:"
- - "L0, all files 27mb "
- - "L0.1632[1686860536837837808,1686860866085039343] 1686936871.55s|----------------------------------------L0.1632-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 27mb total:"
- - "L0 "
- - "L0.?[1686860536837837808,1686860817905001671] 1686936871.55s 23mb|-----------------------------------L0.?-----------------------------------| "
- - "L0.?[1686860817905001672,1686860866085039343] 1686936871.55s 4mb |---L0.?----| "
- - "**** Simulation run 441, type=split(ReduceOverlap)(split_times=[1686861030203733704]). 1 Input Files, 45mb total:"
- - "L0, all files 45mb "
- - "L0.1639[1686860866085039344,1686861407648648616] 1686936871.55s|----------------------------------------L0.1639-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 45mb total:"
- - "L0 "
- - "L0.?[1686860866085039344,1686861030203733704] 1686936871.55s 14mb|----------L0.?-----------| "
- - "L0.?[1686861030203733705,1686861407648648616] 1686936871.55s 31mb |----------------------------L0.?----------------------------| "
- - "**** Simulation run 442, type=split(ReduceOverlap)(split_times=[1686862070968521403]). 1 Input Files, 4mb total:"
- - "L0, all files 4mb "
- - "L0.1635[1686861407648648617,1686862278459459425] 1686936871.55s|----------------------------------------L0.1635-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 4mb total:"
- - "L0 "
- - "L0.?[1686861407648648617,1686862070968521403] 1686936871.55s 3mb|-------------------------------L0.?-------------------------------| "
- - "L0.?[1686862070968521404,1686862278459459425] 1686936871.55s 854kb |-------L0.?--------| "
- - "**** Simulation run 443, type=split(ReduceOverlap)(split_times=[1686862070968521403]). 1 Input Files, 18mb total:"
- - "L0, all files 18mb "
- - "L0.1643[1686862066143051676,1686862278459459425] 1686936871.55s|----------------------------------------L0.1643-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 18mb total:"
- - "L0 "
- - "L0.?[1686862066143051676,1686862070968521403] 1686936871.55s 412kb|L0.?| "
- - "L0.?[1686862070968521404,1686862278459459425] 1686936871.55s 17mb |----------------------------------------L0.?-----------------------------------------| "
- - "**** Simulation run 444, type=split(ReduceOverlap)(split_times=[1686863111733309101]). 1 Input Files, 22mb total:"
- - "L0, all files 22mb "
- - "L0.1647[1686862975108108078,1686863149270270234] 1686936871.55s|----------------------------------------L0.1647-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 22mb total:"
- - "L0 "
- - "L0.?[1686862975108108078,1686863111733309101] 1686936871.55s 17mb|--------------------------------L0.?--------------------------------| "
- - "L0.?[1686863111733309102,1686863149270270234] 1686936871.55s 5mb |------L0.?-------| "
- - "**** Simulation run 445, type=split(ReduceOverlap)(split_times=[1686863903972972940]). 1 Input Files, 32mb total:"
- - "L0, all files 32mb "
- - "L0.1651[1686863763854983773,1686864020081081043] 1686936871.55s|----------------------------------------L0.1651-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 32mb total:"
- - "L0 "
- - "L0.?[1686863763854983773,1686863903972972940] 1686936871.55s 18mb|---------------------L0.?----------------------| "
- - "L0.?[1686863903972972941,1686864020081081043] 1686936871.55s 15mb |-----------------L0.?-----------------| "
- - "**** Simulation run 446, type=split(ReduceOverlap)(split_times=[1686864651607515459, 1686864832837837803]). 1 Input Files, 43mb total:"
- - "L0, all files 43mb "
- - "L0.1655[1686864552601859467,1686864890891891852] 1686936871.55s|----------------------------------------L0.1655-----------------------------------------|"
- - "**** 3 Output Files (parquet_file_id not yet assigned), 43mb total:"
- - "L0 "
- - "L0.?[1686864552601859467,1686864651607515459] 1686936871.55s 13mb|----------L0.?----------| "
- - "L0.?[1686864651607515460,1686864832837837803] 1686936871.55s 23mb |---------------------L0.?---------------------| "
- - "L0.?[1686864832837837804,1686864890891891852] 1686936871.55s 7mb |----L0.?-----| "
- - "**** Simulation run 447, type=split(ReduceOverlap)(split_times=[1686864832837837803]). 1 Input Files, 8mb total:"
- - "L0, all files 8mb "
- - "L0.1657[1686864651607515460,1686864890891891852] 1686936871.55s|----------------------------------------L0.1657-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 8mb total:"
- - "L0 "
- - "L0.?[1686864651607515460,1686864832837837803] 1686936871.55s 6mb|-------------------------------L0.?-------------------------------| "
- - "L0.?[1686864832837837804,1686864890891891852] 1686936871.55s 2mb |-------L0.?--------| "
- - "**** Simulation run 448, type=split(ReduceOverlap)(split_times=[1686865761702702680]). 1 Input Files, 94mb total:"
- - "L0, all files 94mb "
- - "L0.1663[1686865761702702662,1686866540230594239] 1686936871.55s|----------------------------------------L0.1663----------------------------------------| "
- - "**** 2 Output Files (parquet_file_id not yet assigned), 94mb total:"
- - "L0 "
- - "L0.?[1686865761702702662,1686865761702702680] 1686936871.55s 0b|L0.?| "
- - "L0.?[1686865761702702681,1686866540230594239] 1686936871.55s 94mb|-----------------------------------------L0.?------------------------------------------| "
- - "**** Simulation run 449, type=split(ReduceOverlap)(split_times=[1686867503324324300, 1686867659000000000, 1686867839000000000]). 1 Input Files, 100mb total:"
- - "L0, all files 100mb "
- - "L0.1561[1686867299592048858,1686867966670584223] 1686936871.55s|----------------------------------------L0.1561-----------------------------------------|"
- - "**** 4 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L0 "
- - "L0.?[1686867299592048858,1686867503324324300] 1686936871.55s 31mb|----------L0.?-----------| "
- - "L0.?[1686867503324324301,1686867659000000000] 1686936871.55s 23mb |-------L0.?--------| "
- - "L0.?[1686867659000000001,1686867839000000000] 1686936871.55s 27mb |---------L0.?---------| "
- - "L0.?[1686867839000000001,1686867966670584223] 1686936871.55s 19mb |-----L0.?------| "
- - "**** Simulation run 450, type=split(ReduceOverlap)(split_times=[1686869244945945920]). 1 Input Files, 96mb total:"
- - "L0, all files 96mb "
- - "L0.1668[1686868931238859877,1686869516837837819] 1686936871.55s|----------------------------------------L0.1668-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 96mb total:"
- - "L0 "
- - "L0.?[1686868931238859877,1686869244945945920] 1686936871.55s 51mb|---------------------L0.?---------------------| "
- - "L0.?[1686869244945945921,1686869516837837819] 1686936871.55s 44mb |-----------------L0.?------------------| "
- - "**** Simulation run 451, type=split(ReduceOverlap)(split_times=[1686871857378378350]). 1 Input Files, 45mb total:"
- - "L0, all files 45mb "
- - "L0.1677[1686871857378378350,1686872106056369133] 1686936871.55s|----------------------------------------L0.1677-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 45mb total:"
- - "L0 "
- - "L0.?[1686871857378378350,1686871857378378350] 1686936871.55s 0b|L0.?| "
- - "L0.?[1686871857378378351,1686872106056369133] 1686936871.55s 45mb|-----------------------------------------L0.?------------------------------------------| "
- - "Committing partition 1:"
- - " Soft Deleting 40 files: L0.1517, L0.1520, L0.1534, L0.1561, L0.1565, L0.1569, L0.1575, L0.1579, L0.1585, L0.1589, L0.1590, L0.1594, L0.1598, L0.1599, L0.1600, L0.1601, L0.1607, L0.1609, L0.1613, L0.1615, L0.1616, L0.1617, L0.1618, L0.1619, L0.1625, L0.1627, L0.1631, L0.1632, L0.1633, L0.1634, L0.1635, L0.1639, L0.1643, L0.1647, L0.1651, L0.1655, L0.1657, L0.1663, L0.1668, L0.1677"
- - " Creating 93 files"
- - "**** Simulation run 452, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1686857087331457301, 1686857701608860565]). 11 Input Files, 242mb total:"
- - "L0 "
- - "L0.1612[1686856473054054040,1686857053594594571] 1686935947.46s 48mb|-------------L0.1612-------------| "
- - "L0.1678[1686857053594594572,1686857216145945927] 1686935947.46s 13mb |L0.1678| "
- - "L0.1679[1686857216145945928,1686857679795016021] 1686935947.46s 38mb |---------L0.1679----------| "
- - "L0.1680[1686857679795016022,1686857724225846697] 1686935947.46s 4mb |L0.1680| "
- - "L0.1681[1686857724225846698,1686857924405405380] 1686935947.46s 17mb |-L0.1681--| "
- - "L0.1682[1686857924405405381,1686857959237837817] 1686935947.46s 3mb |L0.1682|"
- - "L1 "
- - "L1.1610[1686856473054054037,1686857053594594571] 1686932677.39s 46mb|-------------L1.1610-------------| "
- - "L1.1611[1686857053594594572,1686857216145945927] 1686932677.39s 13mb |L1.1611| "
- - "L1.1444[1686857216145945928,1686857724225846697] 1686932677.39s 41mb |----------L1.1444-----------| "
- - "L1.1621[1686857724225846698,1686857924405405380] 1686932677.39s 16mb |-L1.1621--| "
- - "L1.1622[1686857924405405381,1686857959237837817] 1686932677.39s 3mb |L1.1622|"
- - "**** 3 Output Files (parquet_file_id not yet assigned), 242mb total:"
- - "L1 "
- - "L1.?[1686856473054054037,1686857087331457301] 1686935947.46s 100mb|---------------L1.?----------------| "
- - "L1.?[1686857087331457302,1686857701608860565] 1686935947.46s 100mb |---------------L1.?----------------| "
- - "L1.?[1686857701608860566,1686857959237837817] 1686935947.46s 42mb |----L1.?-----| "
- - "Committing partition 1:"
- - " Soft Deleting 11 files: L1.1444, L1.1610, L1.1611, L0.1612, L1.1621, L1.1622, L0.1678, L0.1679, L0.1680, L0.1681, L0.1682"
- - " Creating 3 files"
- - "**** Simulation run 453, type=split(ReduceOverlap)(split_times=[1686857087331457301]). 1 Input Files, 2mb total:"
- - "L0, all files 2mb "
- - "L0.1724[1686857053594594572,1686857216145945927] 1686936871.55s|----------------------------------------L0.1724-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 2mb total:"
- - "L0 "
- - "L0.?[1686857053594594572,1686857087331457301] 1686936871.55s 362kb|------L0.?------| "
- - "L0.?[1686857087331457302,1686857216145945927] 1686936871.55s 1mb |--------------------------------L0.?---------------------------------| "
- - "**** Simulation run 454, type=split(ReduceOverlap)(split_times=[1686857701608860565]). 1 Input Files, 5mb total:"
- - "L0, all files 5mb "
- - "L0.1725[1686857216145945928,1686857724225846697] 1686936871.55s|----------------------------------------L0.1725-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 5mb total:"
- - "L0 "
- - "L0.?[1686857216145945928,1686857701608860565] 1686936871.55s 5mb|---------------------------------------L0.?----------------------------------------| "
- - "L0.?[1686857701608860566,1686857724225846697] 1686936871.55s 243kb |L0.?|"
- - "Committing partition 1:"
- - " Soft Deleting 2 files: L0.1724, L0.1725"
- - " Creating 4 files"
- - "**** Simulation run 455, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1686858566228295774, 1686859173218753730]). 9 Input Files, 269mb total:"
- - "L0 "
- - "L0.1683[1686857959237837818,1686858702329729707] 1686935947.46s 62mb|----------------L0.1683----------------| "
- - "L0.1684[1686858702329729708,1686858795216216189] 1686935947.46s 8mb |L0.1684| "
- - "L0.1620[1686858795216216190,1686858886535978002] 1686935947.46s 8mb |L0.1620| "
- - "L0.1685[1686858886535978003,1686858975397639357] 1686935947.46s 7mb |L0.1685| "
- - "L0.1686[1686858975397639358,1686859499000000000] 1686935947.46s 43mb |---------L0.1686----------| "
- - "L1 "
- - "L1.1450[1686857959237837818,1686858702329729707] 1686932677.39s 59mb|----------------L1.1450----------------| "
- - "L1.1623[1686858702329729708,1686858795216216189] 1686932677.39s 7mb |L1.1623| "
- - "L1.1624[1686858795216216190,1686858975397639357] 1686932677.39s 14mb |L1.1624| "
- - "L1.1479[1686858975397639358,1686859589566760129] 1686932677.39s 60mb |------------L1.1479------------| "
- - "**** 3 Output Files (parquet_file_id not yet assigned), 269mb total:"
- - "L1 "
- - "L1.?[1686857959237837818,1686858566228295774] 1686935947.46s 100mb|-------------L1.?--------------| "
- - "L1.?[1686858566228295775,1686859173218753730] 1686935947.46s 100mb |-------------L1.?--------------| "
- - "L1.?[1686859173218753731,1686859589566760129] 1686935947.46s 69mb |--------L1.?--------| "
- - "Committing partition 1:"
- - " Soft Deleting 9 files: L1.1450, L1.1479, L0.1620, L1.1623, L1.1624, L0.1683, L0.1684, L0.1685, L0.1686"
- - " Creating 3 files"
- - "**** Simulation run 456, type=split(ReduceOverlap)(split_times=[1686858566228295774]). 1 Input Files, 8mb total:"
- - "L0, all files 8mb "
- - "L0.1728[1686857959237837818,1686858702329729707] 1686936871.55s|----------------------------------------L0.1728-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 8mb total:"
- - "L0 "
- - "L0.?[1686857959237837818,1686858566228295774] 1686936871.55s 6mb|---------------------------------L0.?----------------------------------| "
- - "L0.?[1686858566228295775,1686858702329729707] 1686936871.55s 1mb |-----L0.?-----| "
- - "**** Simulation run 457, type=split(ReduceOverlap)(split_times=[1686859173218753730]). 1 Input Files, 6mb total:"
- - "L0, all files 6mb "
- - "L0.1732[1686859027432432417,1686859589566760129] 1686936871.55s|----------------------------------------L0.1732-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 6mb total:"
- - "L0 "
- - "L0.?[1686859027432432417,1686859173218753730] 1686936871.55s 2mb|--------L0.?---------| "
- - "L0.?[1686859173218753731,1686859589566760129] 1686936871.55s 4mb |------------------------------L0.?------------------------------| "
- - "Committing partition 1:"
- - " Soft Deleting 2 files: L0.1728, L0.1732"
- - " Creating 4 files"
- - "**** Simulation run 458, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1686841969006279730, 1686842559012559460]). 5 Input Files, 202mb total:"
- - "L0 "
- - "L0.1563[1686841379000000000,1686842540081081080] 1686936871.55s 99mb|---------------------------------------L0.1563---------------------------------------| "
- - "L0.1564[1686842540081081081,1686842547242379893] 1686936871.55s 628kb |L0.1564|"
- - "L0.1687[1686842547242379894,1686842571022320444] 1686936871.55s 2mb |L0.1687|"
- - "L1 "
- - "L1.1340[1686841379000000000,1686842540081081080] 1686932677.39s 97mb|---------------------------------------L1.1340---------------------------------------| "
- - "L1.1341[1686842540081081081,1686842571022320444] 1686932677.39s 3mb |L1.1341|"
- - "**** 3 Output Files (parquet_file_id not yet assigned), 202mb total:"
- - "L1 "
- - "L1.?[1686841379000000000,1686841969006279730] 1686936871.55s 100mb|-------------------L1.?-------------------| "
- - "L1.?[1686841969006279731,1686842559012559460] 1686936871.55s 100mb |-------------------L1.?-------------------| "
- - "L1.?[1686842559012559461,1686842571022320444] 1686936871.55s 2mb |L1.?|"
- - "Committing partition 1:"
- - " Soft Deleting 5 files: L1.1340, L1.1341, L0.1563, L0.1564, L0.1687"
- - " Creating 3 files"
- - "**** Simulation run 459, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1686843161028605744, 1686843751034891043]). 5 Input Files, 202mb total:"
- - "L0 "
- - "L0.1688[1686842571022320445,1686843701162162160] 1686936871.55s 97mb|--------------------------------------L0.1688--------------------------------------| "
- - "L0.1566[1686843701162162161,1686843715484759786] 1686936871.55s 1mb |L0.1566|"
- - "L0.1689[1686843715484759787,1686843763044640888] 1686936871.55s 4mb |L0.1689|"
- - "L1 "
- - "L1.1346[1686842571022320445,1686843701162162160] 1686932677.39s 95mb|--------------------------------------L1.1346--------------------------------------| "
- - "L1.1347[1686843701162162161,1686843763044640888] 1686932677.39s 5mb |L1.1347|"
- - "**** 3 Output Files (parquet_file_id not yet assigned), 202mb total:"
- - "L1 "
- - "L1.?[1686842571022320445,1686843161028605744] 1686936871.55s 100mb|-------------------L1.?-------------------| "
- - "L1.?[1686843161028605745,1686843751034891043] 1686936871.55s 100mb |-------------------L1.?-------------------| "
- - "L1.?[1686843751034891044,1686843763044640888] 1686936871.55s 2mb |L1.?|"
- - "Committing partition 1:"
- - " Soft Deleting 5 files: L1.1346, L1.1347, L0.1566, L0.1688, L0.1689"
- - " Creating 3 files"
- - "**** Simulation run 460, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1686844353050911219]). 2 Input Files, 186mb total:"
- - "L0 "
- - "L0.1690[1686843763044640889,1686844862243243240] 1686936871.55s 94mb|----------------------------------------L0.1690-----------------------------------------|"
- - "L1 "
- - "L1.1339[1686843763044640889,1686844862243243240] 1686932677.39s 92mb|----------------------------------------L1.1339-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 186mb total:"
- - "L1 "
- - "L1.?[1686843763044640889,1686844353050911219] 1686936871.55s 100mb|---------------------L1.?---------------------| "
- - "L1.?[1686844353050911220,1686844862243243240] 1686936871.55s 86mb |-----------------L1.?------------------| "
- - "Committing partition 1:"
- - " Soft Deleting 2 files: L1.1339, L0.1690"
- - " Creating 2 files"
- - "**** Simulation run 461, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1686845452373250763, 1686846042503258285]). 5 Input Files, 202mb total:"
- - "L0 "
- - "L0.1567[1686844862243243241,1686846023324324320] 1686936871.55s 99mb|---------------------------------------L0.1567---------------------------------------| "
- - "L0.1568[1686846023324324321,1686846030485623133] 1686936871.55s 628kb |L0.1568|"
- - "L0.1691[1686846030485623134,1686846054770701976] 1686936871.55s 2mb |L0.1691|"
- - "L1 "
- - "L1.1363[1686844862243243241,1686846023324324320] 1686932677.39s 97mb|---------------------------------------L1.1363---------------------------------------| "
- - "L1.1364[1686846023324324321,1686846054770701976] 1686932677.39s 3mb |L1.1364|"
- - "**** 3 Output Files (parquet_file_id not yet assigned), 202mb total:"
- - "L1 "
- - "L1.?[1686844862243243241,1686845452373250763] 1686936871.55s 100mb|-------------------L1.?-------------------| "
- - "L1.?[1686845452373250764,1686846042503258285] 1686936871.55s 100mb |-------------------L1.?-------------------| "
- - "L1.?[1686846042503258286,1686846054770701976] 1686936871.55s 2mb |L1.?|"
- - "Committing partition 1:"
- - " Soft Deleting 5 files: L1.1363, L1.1364, L0.1567, L0.1568, L0.1691"
- - " Creating 3 files"
- - "**** Simulation run 462, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1686846644900712284, 1686847235030722591]). 5 Input Files, 202mb total:"
- - "L0 "
- - "L0.1692[1686846054770701977,1686847184405405399] 1686936871.55s 97mb|--------------------------------------L0.1692--------------------------------------| "
- - "L0.1570[1686847184405405400,1686847198728003025] 1686936871.55s 1mb |L0.1570|"
- - "L0.1693[1686847198728003026,1686847247298160711] 1686936871.55s 4mb |L0.1693|"
- - "L1 "
- - "L1.1369[1686846054770701977,1686847184405405399] 1686932677.39s 95mb|--------------------------------------L1.1369--------------------------------------| "
- - "L1.1370[1686847184405405400,1686847247298160711] 1686932677.39s 5mb |L1.1370|"
- - "**** 3 Output Files (parquet_file_id not yet assigned), 202mb total:"
- - "L1 "
- - "L1.?[1686846054770701977,1686846644900712284] 1686936871.55s 100mb|-------------------L1.?-------------------| "
- - "L1.?[1686846644900712285,1686847235030722591] 1686936871.55s 100mb |-------------------L1.?-------------------| "
- - "L1.?[1686847235030722592,1686847247298160711] 1686936871.55s 2mb |L1.?|"
- - "Committing partition 1:"
- - " Soft Deleting 5 files: L1.1369, L1.1370, L0.1570, L0.1692, L0.1693"
- - " Creating 3 files"
- - "**** Simulation run 463, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1686847837428156951]). 2 Input Files, 186mb total:"
- - "L0 "
- - "L0.1694[1686847247298160712,1686848345486486480] 1686936871.55s 94mb|----------------------------------------L0.1694-----------------------------------------|"
- - "L1 "
- - "L1.1362[1686847247298160712,1686848345486486480] 1686932677.39s 92mb|----------------------------------------L1.1362-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 186mb total:"
- - "L1 "
- - "L1.?[1686847247298160712,1686847837428156951] 1686936871.55s 100mb|---------------------L1.?---------------------| "
- - "L1.?[1686847837428156952,1686848345486486480] 1686936871.55s 86mb |-----------------L1.?------------------| "
- - "Committing partition 1:"
- - " Soft Deleting 2 files: L1.1362, L0.1694"
- - " Creating 2 files"
- - "**** Simulation run 464, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1686848934953137039, 1686849524419787597]). 9 Input Files, 295mb total:"
- - "L0 "
- - "L0.1573[1686848345486486481,1686849216297297290] 1686936871.55s 76mb|------------------L0.1573------------------| "
- - "L0.1574[1686849216297297291,1686849491497710570] 1686936871.55s 24mb |--L0.1574---| "
- - "L0.1695[1686849491497710571,1686849506567567560] 1686936871.55s 1mb |L0.1695| "
- - "L0.1696[1686849506567567561,1686849559289331160] 1686936871.55s 5mb |L0.1696| "
- - "L0.1697[1686849559289331161,1686850087108108099] 1686936871.55s 46mb |---------L0.1697---------| "
- - "L1 "
- - "L1.1571[1686848345486486481,1686849216297297290] 1686932677.39s 72mb|------------------L1.1571------------------| "
- - "L1.1572[1686849216297297291,1686849506567567560] 1686932677.39s 24mb |--L1.1572---| "
- - "L1.1387[1686849506567567561,1686849559289331160] 1686932677.39s 4mb |L1.1387| "
- - "L1.1577[1686849559289331161,1686850087108108099] 1686932677.39s 43mb |---------L1.1577---------| "
- - "**** 3 Output Files (parquet_file_id not yet assigned), 295mb total:"
- - "L1 "
- - "L1.?[1686848345486486481,1686848934953137039] 1686936871.55s 100mb|------------L1.?------------| "
- - "L1.?[1686848934953137040,1686849524419787597] 1686936871.55s 100mb |------------L1.?------------| "
- - "L1.?[1686849524419787598,1686850087108108099] 1686936871.55s 95mb |-----------L1.?------------| "
- - "Committing partition 1:"
- - " Soft Deleting 9 files: L1.1387, L1.1571, L1.1572, L0.1573, L0.1574, L1.1577, L0.1695, L0.1696, L0.1697"
- - " Creating 3 files"
- - "**** Simulation run 465, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1686850880662583047, 1686851674217057994]). 9 Input Files, 219mb total:"
- - "L0 "
- - "L0.1576[1686850087108108100,1686850637508934659] 1686936871.55s 48mb|---------L0.1576----------| "
- - "L0.1698[1686850637508934660,1686850667648648639] 1686936871.55s 3mb |L0.1698| "
- - "L0.1699[1686850667648648640,1686850773092175839] 1686936871.55s 9mb |L0.1699| "
- - "L0.1700[1686850773092175840,1686850957918918908] 1686936871.55s 16mb |L0.1700| "
- - "L0.1580[1686850957918918909,1686850957918918910] 1686936871.55s 1b |L0.1580| "
- - "L1 "
- - "L1.1578[1686850087108108100,1686850667648648639] 1686932677.39s 48mb|----------L1.1578-----------| "
- - "L1.1393[1686850667648648640,1686850773092175839] 1686932677.39s 9mb |L1.1393| "
- - "L1.1581[1686850773092175840,1686850957918918908] 1686932677.39s 15mb |L1.1581| "
- - "L1.1582[1686850957918918909,1686851828729729717] 1686932677.39s 72mb |-----------------L1.1582------------------| "
- - "**** 3 Output Files (parquet_file_id not yet assigned), 219mb total:"
- - "L1 "
- - "L1.?[1686850087108108100,1686850880662583047] 1686936871.55s 100mb|-----------------L1.?------------------| "
- - "L1.?[1686850880662583048,1686851674217057994] 1686936871.55s 100mb |-----------------L1.?------------------| "
- - "L1.?[1686851674217057995,1686851828729729717] 1686936871.55s 19mb |L1.?-| "
- - "Committing partition 1:"
- - " Soft Deleting 9 files: L1.1393, L0.1576, L1.1578, L0.1580, L1.1581, L1.1582, L0.1698, L0.1699, L0.1700"
- - " Creating 3 files"
- - "**** Simulation run 466, type=split(ReduceOverlap)(split_times=[1686851674217057994]). 1 Input Files, 88mb total:"
- - "L0, all files 88mb "
- - "L0.1584[1686850957918918911,1686851828729729717] 1686936871.55s|----------------------------------------L0.1584-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 88mb total:"
- - "L0 "
- - "L0.?[1686850957918918911,1686851674217057994] 1686936871.55s 73mb|----------------------------------L0.?----------------------------------| "
- - "L0.?[1686851674217057995,1686851828729729717] 1686936871.55s 16mb |----L0.?-----| "
- - "Committing partition 1:"
- - " Soft Deleting 1 files: L0.1584"
- - " Creating 2 files"
- - "**** Simulation run 467, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1686851661119027532, 1686852441575472016]). 8 Input Files, 233mb total:"
- - "L0 "
- - "L0.1807[1686850957918918911,1686851674217057994] 1686936871.55s 73mb |-------------L0.1807-------------| "
- - "L0.1808[1686851674217057995,1686851828729729717] 1686936871.55s 16mb |L0.1808| "
- - "L0.1701[1686851828729729718,1686851828729729720] 1686936871.55s 0b |L0.1701| "
- - "L0.1702[1686851828729729721,1686851943085513215] 1686936871.55s 12mb |L0.1702| "
- - "L1 "
- - "L1.1805[1686850880662583048,1686851674217057994] 1686936871.55s 100mb|---------------L1.1805---------------| "
- - "L1.1806[1686851674217057995,1686851828729729717] 1686936871.55s 19mb |L1.1806| "
- - "L1.1583[1686851828729729718,1686851828729729720] 1686932677.39s 1b |L1.1583| "
- - "L1.1586[1686851828729729721,1686852699540540526] 1686931893.7s 14mb |-----------------L1.1586-----------------| "
- - "**** 3 Output Files (parquet_file_id not yet assigned), 233mb total:"
- - "L1 "
- - "L1.?[1686850880662583048,1686851661119027532] 1686936871.55s 100mb|----------------L1.?----------------| "
- - "L1.?[1686851661119027533,1686852441575472016] 1686936871.55s 100mb |----------------L1.?----------------| "
- - "L1.?[1686852441575472017,1686852699540540526] 1686936871.55s 33mb |---L1.?---| "
- - "Committing partition 1:"
- - " Soft Deleting 8 files: L1.1583, L1.1586, L0.1701, L0.1702, L1.1805, L1.1806, L0.1807, L0.1808"
- - " Creating 3 files"
- - "**** Simulation run 468, type=split(ReduceOverlap)(split_times=[1686852441575472016]). 1 Input Files, 77mb total:"
- - "L0, all files 77mb "
- - "L0.1588[1686851943085513216,1686852699540540526] 1686936871.55s|----------------------------------------L0.1588-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 77mb total:"
- - "L0 "
- - "L0.?[1686851943085513216,1686852441575472016] 1686936871.55s 51mb|--------------------------L0.?---------------------------| "
- - "L0.?[1686852441575472017,1686852699540540526] 1686936871.55s 26mb |------------L0.?------------| "
- - "Committing partition 1:"
- - " Soft Deleting 1 files: L0.1588"
- - " Creating 2 files"
- - "**** Simulation run 469, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1686852290761155116, 1686852920403282699]). 8 Input Files, 292mb total:"
- - "L0 "
- - "L0.1812[1686851943085513216,1686852441575472016] 1686936871.55s 51mb |-------L0.1812--------| "
- - "L0.1813[1686852441575472017,1686852699540540526] 1686936871.55s 26mb |-L0.1813--| "
- - "L0.1703[1686852699540540527,1686852757594594584] 1686936871.55s 6mb |L0.1703| "
- - "L0.1704[1686852757594594585,1686852928252107519] 1686936871.55s 17mb |L0.1704| "
- - "L1 "
- - "L1.1810[1686851661119027533,1686852441575472016] 1686936871.55s 100mb|--------------L1.1810---------------| "
- - "L1.1811[1686852441575472017,1686852699540540526] 1686936871.55s 33mb |-L1.1811--| "
- - "L1.1587[1686852699540540527,1686852757594594584] 1686931893.7s 927kb |L1.1587| "
- - "L1.1409[1686852757594594585,1686853500686486475] 1686932677.39s 58mb |-------------L1.1409--------------| "
- - "**** 3 Output Files (parquet_file_id not yet assigned), 292mb total:"
- - "L1 "
- - "L1.?[1686851661119027533,1686852290761155116] 1686936871.55s 100mb|------------L1.?------------| "
- - "L1.?[1686852290761155117,1686852920403282699] 1686936871.55s 100mb |------------L1.?------------| "
- - "L1.?[1686852920403282700,1686853500686486475] 1686936871.55s 92mb |-----------L1.?-----------| "
- - "Committing partition 1:"
- - " Soft Deleting 8 files: L1.1409, L1.1587, L0.1703, L0.1704, L1.1810, L1.1811, L0.1812, L0.1813"
- - " Creating 3 files"
- - "**** Simulation run 470, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1686853405911193904, 1686853891419105108]). 9 Input Files, 273mb total:"
- - "L0 "
- - "L0.1705[1686852928252107520,1686853500686486475] 1686936871.55s 58mb|--------------L0.1705---------------| "
- - "L0.1706[1686853500686486476,1686853570351351335] 1686936871.55s 7mb |L0.1706| "
- - "L0.1591[1686853570351351336,1686853686459459447] 1686936871.55s 12mb |L0.1591| "
- - "L0.1707[1686853686459459448,1686854034336087198] 1686936871.55s 28mb |-------L0.1707-------| "
- - "L0.1708[1686854034336087199,1686854243778378365] 1686936871.55s 17mb |--L0.1708---| "
- - "L1 "
- - "L1.1816[1686852920403282700,1686853500686486475] 1686936871.55s 92mb|---------------L1.1816---------------| "
- - "L1.1592[1686853500686486476,1686853570351351335] 1686932677.39s 5mb |L1.1592| "
- - "L1.1593[1686853570351351336,1686854034336087198] 1686932677.39s 36mb |-----------L1.1593-----------| "
- - "L1.1419[1686854034336087199,1686854243778378365] 1686932677.39s 16mb |--L1.1419---| "
- - "**** 3 Output Files (parquet_file_id not yet assigned), 273mb total:"
- - "L1 "
- - "L1.?[1686852920403282700,1686853405911193904] 1686936871.55s 100mb|-------------L1.?--------------| "
- - "L1.?[1686853405911193905,1686853891419105108] 1686936871.55s 100mb |-------------L1.?--------------| "
- - "L1.?[1686853891419105109,1686854243778378365] 1686936871.55s 73mb |--------L1.?---------| "
- - "Committing partition 1:"
- - " Soft Deleting 9 files: L1.1419, L0.1591, L1.1592, L1.1593, L0.1705, L0.1706, L0.1707, L0.1708, L1.1816"
- - " Creating 3 files"
- - "**** Simulation run 471, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1686854840368603300, 1686855436958828234]). 15 Input Files, 249mb total:"
- - "L0 "
- - "L0.1709[1686854243778378366,1686854441162162144] 1686936871.55s 16mb|-L0.1709-| "
- - "L0.1595[1686854441162162145,1686854918102788682] 1686936871.55s 39mb |---------L0.1595----------| "
- - "L0.1710[1686854819000000001,1686854986870270255] 1686936871.55s 2mb |L0.1710-| "
- - "L0.1713[1686854918102788683,1686854986870270255] 1686936871.55s 6mb |L0.1713| "
- - "L0.1711[1686854986870270256,1686855311077579811] 1686936871.55s 4mb |-----L0.1711-----| "
- - "L0.1714[1686854986870270256,1686855311077579811] 1686936871.55s 26mb |-----L0.1714-----| "
- - "L0.1712[1686855311077579812,1686855311972972953] 1686936871.55s 12kb |L0.1712| "
- - "L0.1715[1686855311077579812,1686855311972972953] 1686936871.55s 74kb |L0.1715| "
- - "L0.1716[1686855311972972954,1686855729962162145] 1686936871.55s 34mb |--------L0.1716--------| "
- - "L0.1718[1686855311972972954,1686855729962162145] 1686936871.55s 6mb |--------L0.1718--------| "
- - "L1 "
- - "L1.1596[1686854243778378366,1686854441162162144] 1686932677.39s 15mb|-L1.1596-| "
- - "L1.1597[1686854441162162145,1686854986870270255] 1686932677.39s 43mb |------------L1.1597------------| "
- - "L1.1421[1686854986870270256,1686855311077579811] 1686932677.39s 25mb |-----L1.1421-----| "
- - "L1.1602[1686855311077579812,1686855311972972953] 1686932677.39s 72kb |L1.1602| "
- - "L1.1603[1686855311972972954,1686855729962162145] 1686932677.39s 33mb |--------L1.1603--------| "
- - "**** 3 Output Files (parquet_file_id not yet assigned), 249mb total:"
- - "L1 "
- - "L1.?[1686854243778378366,1686854840368603300] 1686936871.55s 100mb|---------------L1.?---------------| "
- - "L1.?[1686854840368603301,1686855436958828234] 1686936871.55s 100mb |---------------L1.?---------------| "
- - "L1.?[1686855436958828235,1686855729962162145] 1686936871.55s 49mb |-----L1.?------| "
- - "Committing partition 1:"
- - " Soft Deleting 15 files: L1.1421, L0.1595, L1.1596, L1.1597, L1.1602, L1.1603, L0.1709, L0.1710, L0.1711, L0.1712, L0.1713, L0.1714, L0.1715, L0.1716, L0.1718"
- - " Creating 3 files"
- - "**** Simulation run 472, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1686856308052485466, 1686856886142808786]). 13 Input Files, 235mb total:"
- - "L0 "
- - "L0.1719[1686855729962162146,1686856142243243231] 1686936871.55s 5mb|---------L0.1719---------| "
- - "L0.1717[1686855729962162146,1686856149746117916] 1686936871.55s 34mb|---------L0.1717---------| "
- - "L0.1606[1686856142243243232,1686856182783783762] 1686936871.55s 549kb |L0.1606| "
- - "L0.1608[1686856149746117917,1686856182783783762] 1686936871.55s 3mb |L0.1608| "
- - "L0.1722[1686856182783783763,1686856473054054036] 1686936871.55s 4mb |-----L0.1722-----| "
- - "L0.1720[1686856182783783763,1686856473054054036] 1686936871.55s 24mb |-----L0.1720-----| "
- - "L0.1723[1686856473054054037,1686856473054054039] 1686936871.55s 1b |L0.1723| "
- - "L0.1721[1686856473054054037,1686856473054054039] 1686936871.55s 1b |L0.1721| "
- - "L0.1614[1686856473054054040,1686857053594594571] 1686936871.55s 6mb |--------------L0.1614---------------| "
- - "L0.1774[1686857053594594572,1686857087331457301] 1686936871.55s 362kb |L0.1774|"
- - "L1 "
- - "L1.1604[1686855729962162146,1686856182783783762] 1686932677.39s 35mb|----------L1.1604-----------| "
- - "L1.1605[1686856182783783763,1686856473054054036] 1686932677.39s 23mb |-----L1.1605-----| "
- - "L1.1771[1686856473054054037,1686857087331457301] 1686935947.46s 100mb |---------------L1.1771----------------| "
- - "**** 3 Output Files (parquet_file_id not yet assigned), 235mb total:"
- - "L1 "
- - "L1.?[1686855729962162146,1686856308052485466] 1686936871.55s 100mb|----------------L1.?----------------| "
- - "L1.?[1686856308052485467,1686856886142808786] 1686936871.55s 100mb |----------------L1.?----------------| "
- - "L1.?[1686856886142808787,1686857087331457301] 1686936871.55s 35mb |---L1.?----| "
- - "Committing partition 1:"
- - " Soft Deleting 13 files: L1.1604, L1.1605, L0.1606, L0.1608, L0.1614, L0.1717, L0.1719, L0.1720, L0.1721, L0.1722, L0.1723, L1.1771, L0.1774"
- - " Creating 3 files"
- - "**** Simulation run 473, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1686857661762616884, 1686858236193776466]). 9 Input Files, 257mb total:"
- - "L0 "
- - "L0.1775[1686857087331457302,1686857216145945927] 1686936871.55s 1mb|L0.1775| "
- - "L0.1776[1686857216145945928,1686857701608860565] 1686936871.55s 5mb |----------L0.1776----------| "
- - "L0.1777[1686857701608860566,1686857724225846697] 1686936871.55s 243kb |L0.1777| "
- - "L0.1726[1686857724225846698,1686857924405405380] 1686936871.55s 2mb |-L0.1726--| "
- - "L0.1727[1686857924405405381,1686857959237837817] 1686936871.55s 374kb |L0.1727| "
- - "L0.1781[1686857959237837818,1686858566228295774] 1686936871.55s 6mb |-------------L0.1781--------------| "
- - "L1 "
- - "L1.1772[1686857087331457302,1686857701608860565] 1686935947.46s 100mb|--------------L1.1772--------------| "
- - "L1.1773[1686857701608860566,1686857959237837817] 1686935947.46s 42mb |---L1.1773---| "
- - "L1.1778[1686857959237837818,1686858566228295774] 1686935947.46s 100mb |-------------L1.1778--------------| "
- - "**** 3 Output Files (parquet_file_id not yet assigned), 257mb total:"
- - "L1 "
- - "L1.?[1686857087331457302,1686857661762616884] 1686936871.55s 100mb|--------------L1.?--------------| "
- - "L1.?[1686857661762616885,1686858236193776466] 1686936871.55s 100mb |--------------L1.?--------------| "
- - "L1.?[1686858236193776467,1686858566228295774] 1686936871.55s 57mb |-------L1.?-------| "
- - "Committing partition 1:"
- - " Soft Deleting 9 files: L0.1726, L0.1727, L1.1772, L1.1773, L0.1775, L0.1776, L0.1777, L1.1778, L0.1781"
- - " Creating 3 files"
- - "**** Simulation run 474, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1686859138657133582, 1686859711085971389]). 19 Input Files, 286mb total:"
- - "L0 "
- - "L0.1782[1686858566228295775,1686858702329729707] 1686936871.55s 1mb|L0.1782| "
- - "L0.1729[1686858702329729708,1686858795216216189] 1686936871.55s 998kb |L0.1729| "
- - "L0.1730[1686858795216216190,1686858975397639357] 1686936871.55s 2mb |L0.1730| "
- - "L0.1731[1686858975397639358,1686859027432432416] 1686936871.55s 559kb |L0.1731| "
- - "L0.1783[1686859027432432417,1686859173218753730] 1686936871.55s 2mb |L0.1783| "
- - "L0.1784[1686859173218753731,1686859589566760129] 1686936871.55s 4mb |------L0.1784-------| "
- - "L0.1734[1686859499000000001,1686859589566760129] 1686936871.55s 300kb |L0.1734| "
- - "L0.1735[1686859589566760130,1686859666027026998] 1686936871.55s 253kb |L0.1735| "
- - "L0.1733[1686859589566760130,1686859666027026998] 1686936871.55s 821kb |L0.1733| "
- - "L0.1626[1686859666027026999,1686859666027027010] 1686936871.55s 1b |L0.1626| "
- - "L0.1628[1686859666027026999,1686859666027027010] 1686936871.55s 1b |L0.1628| "
- - "L0.1736[1686859666027027011,1686860002800686531] 1686936871.55s 28mb |----L0.1736-----| "
- - "L0.1739[1686860002800686532,1686860203735880900] 1686936871.55s 827kb |-L0.1739-| "
- - "L0.1737[1686860002800686532,1686860203735880900] 1686936871.55s 17mb |-L0.1737-| "
- - "L1 "
- - "L1.1779[1686858566228295775,1686859173218753730] 1686935947.46s 100mb|------------L1.1779------------| "
- - "L1.1780[1686859173218753731,1686859589566760129] 1686935947.46s 69mb |------L1.1780-------| "
- - "L1.1629[1686859589566760130,1686859666027026998] 1686932677.39s 7mb |L1.1629| "
- - "L1.1630[1686859666027026999,1686860002800686531] 1686932677.39s 33mb |----L1.1630-----| "
- - "L1.1485[1686860002800686532,1686860203735880900] 1686932677.39s 20mb |-L1.1485-| "
- - "**** 3 Output Files (parquet_file_id not yet assigned), 286mb total:"
- - "L1 "
- - "L1.?[1686858566228295775,1686859138657133582] 1686936871.55s 100mb|------------L1.?-------------| "
- - "L1.?[1686859138657133583,1686859711085971389] 1686936871.55s 100mb |------------L1.?-------------| "
- - "L1.?[1686859711085971390,1686860203735880900] 1686936871.55s 86mb |----------L1.?-----------| "
- - "Committing partition 1:"
- - " Soft Deleting 19 files: L1.1485, L0.1626, L0.1628, L1.1629, L1.1630, L0.1729, L0.1730, L0.1731, L0.1733, L0.1734, L0.1735, L0.1736, L0.1737, L0.1739, L1.1779, L1.1780, L0.1782, L0.1783, L0.1784"
- - " Creating 3 files"
- - "**** Simulation run 475, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1686860851476331520, 1686861499216782139]). 15 Input Files, 288mb total:"
- - "L0 "
- - "L0.1738[1686860203735880901,1686860536837837807] 1686936871.55s 28mb|---L0.1738----| "
- - "L0.1740[1686860203735880901,1686860536837837807] 1686936871.55s 1mb|---L0.1740----| "
- - "L0.1741[1686860536837837808,1686860817905001671] 1686936871.55s 1mb |--L0.1741--| "
- - "L0.1744[1686860536837837808,1686860817905001671] 1686936871.55s 23mb |--L0.1744--| "
- - "L0.1742[1686860817905001672,1686861030203733704] 1686936871.55s 874kb |L0.1742-| "
- - "L0.1745[1686860817905001672,1686860866085039343] 1686936871.55s 4mb |L0.1745| "
- - "L0.1746[1686860866085039344,1686861030203733704] 1686936871.55s 14mb |L0.1746| "
- - "L0.1743[1686861030203733705,1686861407648648616] 1686936871.55s 2mb |----L0.1743-----| "
- - "L0.1747[1686861030203733705,1686861407648648616] 1686936871.55s 31mb |----L0.1747-----| "
- - "L0.1748[1686861407648648617,1686862070968521403] 1686936871.55s 3mb |-----------L0.1748-----------| "
- - "L1 "
- - "L1.1637[1686860203735880901,1686860536837837807] 1686932677.39s 32mb|---L1.1637----| "
- - "L1.1638[1686860536837837808,1686860817905001671] 1686932677.39s 27mb |--L1.1638--| "
- - "L1.1487[1686860817905001672,1686861030203733704] 1686932677.39s 21mb |L1.1487-| "
- - "L1.1641[1686861030203733705,1686861407648648616] 1686932677.39s 36mb |----L1.1641-----| "
- - "L1.1642[1686861407648648617,1686862070968521403] 1686932677.39s 64mb |-----------L1.1642-----------| "
- - "**** 3 Output Files (parquet_file_id not yet assigned), 288mb total:"
- - "L1 "
- - "L1.?[1686860203735880901,1686860851476331520] 1686936871.55s 100mb|------------L1.?-------------| "
- - "L1.?[1686860851476331521,1686861499216782139] 1686936871.55s 100mb |------------L1.?-------------| "
- - "L1.?[1686861499216782140,1686862070968521403] 1686936871.55s 88mb |----------L1.?-----------| "
- - "Committing partition 1:"
- - " Soft Deleting 15 files: L1.1487, L1.1637, L1.1638, L1.1641, L1.1642, L0.1738, L0.1740, L0.1741, L0.1742, L0.1743, L0.1744, L0.1745, L0.1746, L0.1747, L0.1748"
- - " Creating 3 files"
- - "**** Simulation run 476, type=split(ReduceOverlap)(split_times=[1686861499216782139]). 1 Input Files, 55mb total:"
- - "L0, all files 55mb "
- - "L0.1640[1686861407648648617,1686862066143051675] 1686936871.55s|----------------------------------------L0.1640-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 55mb total:"
- - "L0 "
- - "L0.?[1686861407648648617,1686861499216782139] 1686936871.55s 8mb|---L0.?---| "
- - "L0.?[1686861499216782140,1686862066143051675] 1686936871.55s 47mb |-----------------------------------L0.?------------------------------------| "
- - "Committing partition 1:"
- - " Soft Deleting 1 files: L0.1640"
- - " Creating 2 files"
- - "**** Simulation run 477, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1686861358211972492, 1686861864947613463]). 8 Input Files, 282mb total:"
- - "L0 "
- - "L0.1835[1686861407648648617,1686861499216782139] 1686936871.55s 8mb |L0.1835| "
- - "L0.1836[1686861499216782140,1686862066143051675] 1686936871.55s 47mb |-------------L0.1836-------------| "
- - "L0.1750[1686862066143051676,1686862070968521403] 1686936871.55s 412kb |L0.1750| "
- - "L0.1749[1686862070968521404,1686862278459459425] 1686936871.55s 854kb |--L0.1749--| "
- - "L0.1751[1686862070968521404,1686862278459459425] 1686936871.55s 17mb |--L0.1751--| "
- - "L1 "
- - "L1.1833[1686860851476331521,1686861499216782139] 1686936871.55s 100mb|---------------L1.1833----------------| "
- - "L1.1834[1686861499216782140,1686862070968521403] 1686936871.55s 88mb |-------------L1.1834--------------| "
- - "L1.1645[1686862070968521404,1686862278459459425] 1686932677.39s 20mb |--L1.1645--| "
- - "**** 3 Output Files (parquet_file_id not yet assigned), 282mb total:"
- - "L1 "
- - "L1.?[1686860851476331521,1686861358211972492] 1686936871.55s 100mb|------------L1.?-------------| "
- - "L1.?[1686861358211972493,1686861864947613463] 1686936871.55s 100mb |------------L1.?-------------| "
- - "L1.?[1686861864947613464,1686862278459459425] 1686936871.55s 82mb |----------L1.?----------| "
- - "Committing partition 1:"
- - " Soft Deleting 8 files: L1.1645, L0.1749, L0.1750, L0.1751, L1.1833, L1.1834, L0.1835, L0.1836"
- - " Creating 3 files"
- - "**** Simulation run 478, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1686862801147318435]). 6 Input Files, 167mb total:"
- - "L0 "
- - "L0.1636[1686862278459459426,1686862975108108077] 1686936871.55s 3mb|-------------------------------L0.1636--------------------------------| "
- - "L0.1644[1686862278459459426,1686862975108108077] 1686936871.55s 58mb|-------------------------------L0.1644--------------------------------| "
- - "L0.1752[1686862975108108078,1686863111733309101] 1686936871.55s 17mb |--L0.1752---| "
- - "L0.1753[1686863111733309102,1686863149270270234] 1686936871.55s 5mb |L0.1753|"
- - "L1 "
- - "L1.1646[1686862278459459426,1686863111733309101] 1686932677.39s 80mb|--------------------------------------L1.1646---------------------------------------| "
- - "L1.1649[1686863111733309102,1686863149270270234] 1686932677.39s 4mb |L1.1649|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 167mb total:"
- - "L1 "
- - "L1.?[1686862278459459426,1686862801147318435] 1686936871.55s 100mb|------------------------L1.?------------------------| "
- - "L1.?[1686862801147318436,1686863149270270234] 1686936871.55s 67mb |--------------L1.?---------------| "
- - "Committing partition 1:"
- - " Soft Deleting 6 files: L0.1636, L0.1644, L1.1646, L1.1649, L0.1752, L0.1753"
- - " Creating 2 files"
- - "**** Simulation run 479, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1686863696277675850, 1686864243285081465]). 8 Input Files, 275mb total:"
- - "L0 "
- - "L0.1648[1686863149270270235,1686863763854983772] 1686936871.55s 78mb|-------------L0.1648--------------| "
- - "L0.1754[1686863763854983773,1686863903972972940] 1686936871.55s 18mb |L0.1754| "
- - "L0.1755[1686863903972972941,1686864020081081043] 1686936871.55s 15mb |L0.1755| "
- - "L0.1652[1686864020081081044,1686864552601859466] 1686936871.55s 68mb |-----------L0.1652-----------| "
- - "L0.1756[1686864552601859467,1686864651607515459] 1686936871.55s 13mb |L0.1756|"
- - "L1 "
- - "L1.1650[1686863149270270235,1686863903972972940] 1686932677.39s 73mb|------------------L1.1650------------------| "
- - "L1.1653[1686863903972972941,1686864020081081043] 1686931893.7s 2mb |L1.1653| "
- - "L1.1654[1686864020081081044,1686864651607515459] 1686931893.7s 10mb |--------------L1.1654--------------| "
- - "**** 3 Output Files (parquet_file_id not yet assigned), 275mb total:"
- - "L1 "
- - "L1.?[1686863149270270235,1686863696277675850] 1686936871.55s 100mb|-------------L1.?-------------| "
- - "L1.?[1686863696277675851,1686864243285081465] 1686936871.55s 100mb |-------------L1.?-------------| "
- - "L1.?[1686864243285081466,1686864651607515459] 1686936871.55s 75mb |---------L1.?---------| "
- - "Committing partition 1:"
- - " Soft Deleting 8 files: L0.1648, L1.1650, L0.1652, L1.1653, L1.1654, L0.1754, L0.1755, L0.1756"
- - " Creating 3 files"
- - "**** Simulation run 480, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1686865392302667070, 1686866132997818680]). 16 Input Files, 267mb total:"
- - "L0 "
- - "L0.1759[1686864651607515460,1686864832837837803] 1686936871.55s 6mb|L0.1759| "
- - "L0.1757[1686864651607515460,1686864832837837803] 1686936871.55s 23mb|L0.1757| "
- - "L0.1760[1686864832837837804,1686864890891891852] 1686936871.55s 2mb |L0.1760| "
- - "L0.1758[1686864832837837804,1686864890891891852] 1686936871.55s 7mb |L0.1758| "
- - "L0.1656[1686864890891891853,1686864890891891870] 1686936871.55s 1b |L0.1656| "
- - "L0.1658[1686864890891891853,1686864890891891870] 1686936871.55s 1b |L0.1658| "
- - "L0.1546[1686864890891891871,1686865715561243055] 1686936871.55s 100mb |--------------L0.1546--------------| "
- - "L0.1662[1686865715561243056,1686865761702702661] 1686936871.55s 6mb |L0.1662| "
- - "L0.1761[1686865761702702662,1686865761702702680] 1686936871.55s 0b |L0.1761| "
- - "L0.1762[1686865761702702681,1686866540230594239] 1686936871.55s 94mb |-------------L0.1762-------------| "
- - "L0.1548[1686866540230594240,1686866632513513490] 1686936871.55s 11mb |L0.1548|"
- - "L1 "
- - "L1.1325[1686864651607515460,1686864832837837803] 1686931893.7s 3mb|L1.1325| "
- - "L1.1659[1686864832837837804,1686864890891891852] 1686931893.7s 927kb |L1.1659| "
- - "L1.1660[1686864890891891853,1686865761702702661] 1686931893.7s 14mb |---------------L1.1660---------------| "
- - "L1.1661[1686865761702702662,1686865761702702680] 1686931893.7s 1b |L1.1661| "
- - "L1.761[1686865761702702681,1686866632513513490] 1686928854.57s 1mb |---------------L1.761----------------| "
- - "**** 3 Output Files (parquet_file_id not yet assigned), 267mb total:"
- - "L1 "
- - "L1.?[1686864651607515460,1686865392302667070] 1686936871.55s 100mb|-------------L1.?--------------| "
- - "L1.?[1686865392302667071,1686866132997818680] 1686936871.55s 100mb |-------------L1.?--------------| "
- - "L1.?[1686866132997818681,1686866632513513490] 1686936871.55s 67mb |--------L1.?--------| "
- - "Committing partition 1:"
- - " Soft Deleting 16 files: L1.761, L1.1325, L0.1546, L0.1548, L0.1656, L0.1658, L1.1659, L1.1660, L1.1661, L0.1662, L0.1757, L0.1758, L0.1759, L0.1760, L0.1761, L0.1762"
- - " Creating 3 files"
- - "**** Simulation run 481, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1686867294213029769, 1686867955912546047]). 10 Input Files, 255mb total:"
- - "L0 "
- - "L0.1560[1686866632513513491,1686867299592048857] 1686936871.55s 100mb|-------------L0.1560-------------| "
- - "L0.1763[1686867299592048858,1686867503324324300] 1686936871.55s 31mb |L0.1763-| "
- - "L0.1764[1686867503324324301,1686867659000000000] 1686936871.55s 23mb |L0.1764| "
- - "L0.1765[1686867659000000001,1686867839000000000] 1686936871.55s 27mb |L0.1765| "
- - "L0.1766[1686867839000000001,1686867966670584223] 1686936871.55s 19mb |L0.1766| "
- - "L0.1562[1686867966670584224,1686868319000000000] 1686936871.55s 53mb |----L0.1562-----| "
- - "L1 "
- - "L1.762[1686866632513513491,1686867503324324300] 1686928854.57s 1mb|-------------------L1.762-------------------| "
- - "L1.763[1686867503324324301,1686867659000000000] 1686928854.57s 196kb |L1.763| "
- - "L1.73[1686867719000000000,1686867839000000000] 1686928854.57s 225kb |L1.73| "
- - "L1.75[1686867899000000000,1686868319000000000] 1686928854.57s 588kb |-------L1.75--------| "
- - "**** 3 Output Files (parquet_file_id not yet assigned), 255mb total:"
- - "L1 "
- - "L1.?[1686866632513513491,1686867294213029769] 1686936871.55s 100mb|--------------L1.?---------------| "
- - "L1.?[1686867294213029770,1686867955912546047] 1686936871.55s 100mb |--------------L1.?---------------| "
- - "L1.?[1686867955912546048,1686868319000000000] 1686936871.55s 55mb |------L1.?-------| "
- - "Committing partition 1:"
- - " Soft Deleting 10 files: L1.73, L1.75, L1.762, L1.763, L0.1560, L0.1562, L0.1763, L0.1764, L0.1765, L0.1766"
- - " Creating 3 files"
- - "**** Simulation run 482, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1686869101428723112, 1686869883857446223]). 9 Input Files, 230mb total:"
- - "L0 "
- - "L0.1664[1686868319000000001,1686868917918918910] 1686936871.55s 98mb|----------L0.1664----------| "
- - "L0.1665[1686868917918918911,1686868931238859876] 1686936871.55s 2mb |L0.1665| "
- - "L0.1767[1686868931238859877,1686869244945945920] 1686936871.55s 51mb |---L0.1767---| "
- - "L0.1768[1686869244945945921,1686869516837837819] 1686936871.55s 44mb |--L0.1768--| "
- - "L0.1669[1686869516837837820,1686869543477719751] 1686936871.55s 4mb |L0.1669| "
- - "L1 "
- - "L1.1666[1686868379000000000,1686868917918918910] 1686928854.57s 9mb |--------L1.1666---------| "
- - "L1.1667[1686868917918918911,1686869244945945920] 1686928854.57s 6mb |---L1.1667----| "
- - "L1.1670[1686869244945945921,1686869516837837819] 1686928854.57s 5mb |--L1.1670--| "
- - "L1.1671[1686869516837837820,1686870115756756730] 1686928854.57s 10mb |----------L1.1671-----------| "
- - "**** 3 Output Files (parquet_file_id not yet assigned), 230mb total:"
- - "L1 "
- - "L1.?[1686868319000000001,1686869101428723112] 1686936871.55s 100mb|----------------L1.?-----------------| "
- - "L1.?[1686869101428723113,1686869883857446223] 1686936871.55s 100mb |----------------L1.?-----------------| "
- - "L1.?[1686869883857446224,1686870115756756730] 1686936871.55s 30mb |--L1.?---| "
- - "Committing partition 1:"
- - " Soft Deleting 9 files: L0.1664, L0.1665, L1.1666, L1.1667, L0.1669, L1.1670, L1.1671, L0.1767, L0.1768"
- - " Creating 3 files"
- - "**** Simulation run 483, type=split(ReduceOverlap)(split_times=[1686869883857446223]). 1 Input Files, 93mb total:"
- - "L0, all files 93mb "
- - "L0.1551[1686869543477719752,1686870115756756730] 1686936871.55s|----------------------------------------L0.1551-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 93mb total:"
- - "L0 "
- - "L0.?[1686869543477719752,1686869883857446223] 1686936871.55s 56mb|-----------------------L0.?------------------------| "
- - "L0.?[1686869883857446224,1686870115756756730] 1686936871.55s 38mb |---------------L0.?---------------| "
- - "Committing partition 1:"
- - " Soft Deleting 1 files: L0.1551"
- - " Creating 2 files"
- - "**** Simulation run 484, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1686869556056907425, 1686870010685091737]). 4 Input Files, 223mb total:"
- - "L0 "
- - "L0.1854[1686869543477719752,1686869883857446223] 1686936871.55s 56mb |----------L0.1854-----------| "
- - "L0.1855[1686869883857446224,1686870115756756730] 1686936871.55s 38mb |-----L0.1855------| "
- - "L1 "
- - "L1.1852[1686869101428723113,1686869883857446223] 1686936871.55s 100mb|------------------------------L1.1852------------------------------| "
- - "L1.1853[1686869883857446224,1686870115756756730] 1686936871.55s 30mb |-----L1.1853------| "
- - "**** 3 Output Files (parquet_file_id not yet assigned), 223mb total:"
- - "L1 "
- - "L1.?[1686869101428723113,1686869556056907425] 1686936871.55s 100mb|-----------------L1.?-----------------| "
- - "L1.?[1686869556056907426,1686870010685091737] 1686936871.55s 100mb |-----------------L1.?-----------------| "
- - "L1.?[1686870010685091738,1686870115756756730] 1686936871.55s 23mb |-L1.?--| "
- - "Committing partition 1:"
- - " Soft Deleting 4 files: L1.1852, L1.1853, L0.1854, L0.1855"
- - " Creating 3 files"
- - "**** Simulation run 485, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1686870726636810267, 1686871337516863803]). 6 Input Files, 285mb total:"
- - "L0 "
- - "L0.1552[1686870115756756731,1686870677571828433] 1686936871.55s 100mb|----------L0.1552----------| "
- - "L0.1672[1686870677571828434,1686870986567567540] 1686936871.55s 55mb |---L0.1672---| "
- - "L0.1673[1686870986567567541,1686871239386900135] 1686936871.55s 45mb |--L0.1673--| "
- - "L0.1554[1686871239386900136,1686871550514515284] 1686936871.55s 55mb |---L0.1554----| "
- - "L1 "
- - "L1.907[1686870115756756731,1686870986567567540] 1686928854.57s 15mb|------------------L1.907-------------------| "
- - "L1.1674[1686870986567567541,1686871857378378349] 1686928854.57s 15mb |-----------------L1.1674------------------| "
- - "**** 3 Output Files (parquet_file_id not yet assigned), 285mb total:"
- - "L1 "
- - "L1.?[1686870115756756731,1686870726636810267] 1686936871.55s 100mb|------------L1.?-------------| "
- - "L1.?[1686870726636810268,1686871337516863803] 1686936871.55s 100mb |------------L1.?-------------| "
- - "L1.?[1686871337516863804,1686871857378378349] 1686936871.55s 85mb |----------L1.?----------| "
- - "Committing partition 1:"
- - " Soft Deleting 6 files: L1.907, L0.1552, L0.1554, L0.1672, L0.1673, L1.1674"
- - " Creating 3 files"
- - "**** Simulation run 486, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1686871801134370324, 1686872264751876844]). 7 Input Files, 300mb total:"
- - "L0 "
- - "L0.1676[1686871550514515285,1686871857378378349] 1686936871.55s 55mb |-----L0.1676-----| "
- - "L0.1769[1686871857378378350,1686871857378378350] 1686936871.55s 0b |L0.1769| "
- - "L0.1770[1686871857378378351,1686872106056369133] 1686936871.55s 45mb |---L0.1770----| "
- - "L0.1556[1686872106056369134,1686872661598222981] 1686936871.55s 100mb |-------------L0.1556-------------| "
- - "L1 "
- - "L1.1861[1686871337516863804,1686871857378378349] 1686936871.55s 85mb|------------L1.1861------------| "
- - "L1.1675[1686871857378378350,1686871857378378350] 1686928854.57s 1b |L1.1675| "
- - "L1.909[1686871857378378351,1686872728189189160] 1686928854.57s 15mb |------------------------L1.909------------------------| "
- - "**** 3 Output Files (parquet_file_id not yet assigned), 300mb total:"
- - "L1 "
- - "L1.?[1686871337516863804,1686871801134370324] 1686936871.55s 100mb|------------L1.?------------| "
- - "L1.?[1686871801134370325,1686872264751876844] 1686936871.55s 100mb |------------L1.?------------| "
- - "L1.?[1686872264751876845,1686872728189189160] 1686936871.55s 100mb |-----------L1.?------------| "
- - "Committing partition 1:"
- - " Soft Deleting 7 files: L1.909, L0.1556, L1.1675, L0.1676, L0.1769, L0.1770, L1.1861"
- - " Creating 3 files"
- - "**** Simulation run 487, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[1686872735710689569, 1686873206669502293]). 5 Input Files, 283mb total:"
- - "L0 "
- - "L0.1559[1686873284631629745,1686873599000000000] 1686936871.55s 56mb |------L0.1559------| "
- - "L0.1558[1686872728189189161,1686873284631629744] 1686936871.55s 100mb |--------------L0.1558--------------| "
- - "L0.1557[1686872661598222982,1686872728189189160] 1686936871.55s 12mb |L0.1557| "
- - "L1 "
- - "L1.1864[1686872264751876845,1686872728189189160] 1686936871.55s 100mb|-----------L1.1864-----------| "
- - "L1.910[1686872728189189161,1686873599000000000] 1686928854.57s 15mb |-------------------------L1.910-------------------------| "
- - "**** 3 Output Files (parquet_file_id not yet assigned), 283mb total:"
- - "L1 "
- - "L1.?[1686872264751876845,1686872735710689569] 1686936871.55s 100mb|------------L1.?-------------| "
- - "L1.?[1686872735710689570,1686873206669502293] 1686936871.55s 100mb |------------L1.?-------------| "
- - "L1.?[1686873206669502294,1686873599000000000] 1686936871.55s 83mb |----------L1.?----------| "
- - "Committing partition 1:"
- - " Soft Deleting 5 files: L1.910, L0.1557, L0.1558, L0.1559, L1.1864"
- - " Creating 3 files"
- - "**** Simulation run 488, type=split(ReduceOverlap)(split_times=[1686867839000000000]). 1 Input Files, 100mb total:"
- - "L1, all files 100mb "
- - "L1.1849[1686867294213029770,1686867955912546047] 1686936871.55s|----------------------------------------L1.1849-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L1 "
- - "L1.?[1686867294213029770,1686867839000000000] 1686936871.55s 82mb|----------------------------------L1.?----------------------------------| "
- - "L1.?[1686867839000000001,1686867955912546047] 1686936871.55s 18mb |----L1.?-----| "
- - "**** Simulation run 489, type=split(ReduceOverlap)(split_times=[1686863699000000000]). 1 Input Files, 100mb total:"
- - "L1, all files 100mb "
- - "L1.1843[1686863696277675851,1686864243285081465] 1686936871.55s|----------------------------------------L1.1843-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L1 "
- - "L1.?[1686863696277675851,1686863699000000000] 1686936871.55s 510kb|L1.?| "
- - "L1.?[1686863699000000001,1686864243285081465] 1686936871.55s 100mb|-----------------------------------------L1.?------------------------------------------| "
- - "**** Simulation run 490, type=split(ReduceOverlap)(split_times=[1686859499000000000]). 1 Input Files, 100mb total:"
- - "L1, all files 100mb "
- - "L1.1830[1686859138657133583,1686859711085971389] 1686936871.55s|----------------------------------------L1.1830-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L1 "
- - "L1.?[1686859138657133583,1686859499000000000] 1686936871.55s 63mb|-------------------------L1.?-------------------------| "
- - "L1.?[1686859499000000001,1686859711085971389] 1686936871.55s 37mb |-------------L1.?--------------| "
- - "**** Simulation run 491, type=split(ReduceOverlap)(split_times=[1686859019000000000]). 1 Input Files, 100mb total:"
- - "L1, all files 100mb "
- - "L1.1829[1686858566228295775,1686859138657133582] 1686936871.55s|----------------------------------------L1.1829-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L1 "
- - "L1.?[1686858566228295775,1686859019000000000] 1686936871.55s 79mb|--------------------------------L1.?---------------------------------| "
- - "L1.?[1686859019000000001,1686859138657133582] 1686936871.55s 21mb |------L1.?------| "
- - "**** Simulation run 492, type=split(ReduceOverlap)(split_times=[1686850559000000000]). 1 Input Files, 100mb total:"
- - "L1, all files 100mb "
- - "L1.1804[1686850087108108100,1686850880662583047] 1686936871.55s|----------------------------------------L1.1804----------------------------------------| "
- - "**** 2 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L1 "
- - "L1.?[1686850087108108100,1686850559000000000] 1686936871.55s 59mb|-----------------------L1.?------------------------| "
- - "L1.?[1686850559000000001,1686850880662583047] 1686936871.55s 41mb |---------------L1.?---------------| "
- - "**** Simulation run 493, type=split(ReduceOverlap)(split_times=[1686849779000000000]). 1 Input Files, 95mb total:"
- - "L1, all files 95mb "
- - "L1.1803[1686849524419787598,1686850087108108099] 1686936871.55s|----------------------------------------L1.1803-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 95mb total:"
- - "L1 "
- - "L1.?[1686849524419787598,1686849779000000000] 1686936871.55s 43mb|-----------------L1.?-----------------| "
- - "L1.?[1686849779000000001,1686850087108108099] 1686936871.55s 52mb |---------------------L1.?----------------------| "
- - "**** Simulation run 494, type=split(ReduceOverlap)(split_times=[1686845579000000000]). 1 Input Files, 100mb total:"
- - "L1, all files 100mb "
- - "L1.1794[1686845452373250764,1686846042503258285] 1686936871.55s|----------------------------------------L1.1794-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L1 "
- - "L1.?[1686845452373250764,1686845579000000000] 1686936871.55s 21mb|------L1.?-------| "
- - "L1.?[1686845579000000001,1686846042503258285] 1686936871.55s 79mb |--------------------------------L1.?--------------------------------| "
- - "Committing partition 1:"
- - " Soft Deleting 7 files: L1.1794, L1.1803, L1.1804, L1.1829, L1.1830, L1.1843, L1.1849"
- - " Creating 14 files"
- - "**** Simulation run 495, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1686842786390926609, 1686844193781853218]). 3 Input Files, 298mb total:"
- - "L1 "
- - "L1.1785[1686841379000000000,1686841969006279730] 1686936871.55s 100mb|-L1.1785--| "
- - "L1.1786[1686841969006279731,1686842559012559460] 1686936871.55s 100mb |-L1.1786--| "
- - "L2 "
- - "L2.2[1686841379000000000,1686845579000000000] 1686928811.43s 98mb|------------------------------------------L2.2------------------------------------------|"
- - "**** 3 Output Files (parquet_file_id not yet assigned), 298mb total:"
- - "L2 "
- - "L2.?[1686841379000000000,1686842786390926609] 1686936871.55s 100mb|------------L2.?------------| "
- - "L2.?[1686842786390926610,1686844193781853218] 1686936871.55s 100mb |------------L2.?------------| "
- - "L2.?[1686844193781853219,1686845579000000000] 1686936871.55s 98mb |-----------L2.?------------| "
- - "Committing partition 1:"
- - " Soft Deleting 3 files: L2.2, L1.1785, L1.1786"
- - " Creating 3 files"
- - "**** Simulation run 496, type=split(ReduceOverlap)(split_times=[1686844193781853218]). 1 Input Files, 100mb total:"
- - "L1, all files 100mb "
- - "L1.1791[1686843763044640889,1686844353050911219] 1686936871.55s|----------------------------------------L1.1791-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L1 "
- - "L1.?[1686843763044640889,1686844193781853218] 1686936871.55s 73mb|-----------------------------L1.?------------------------------| "
- - "L1.?[1686844193781853219,1686844353050911219] 1686936871.55s 27mb |---------L1.?---------| "
- - "**** Simulation run 497, type=split(ReduceOverlap)(split_times=[1686842786390926609]). 1 Input Files, 100mb total:"
- - "L1, all files 100mb "
- - "L1.1788[1686842571022320445,1686843161028605744] 1686936871.55s|----------------------------------------L1.1788-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L1 "
- - "L1.?[1686842571022320445,1686842786390926609] 1686936871.55s 37mb|-------------L1.?-------------| "
- - "L1.?[1686842786390926610,1686843161028605744] 1686936871.55s 63mb |-------------------------L1.?--------------------------| "
- - "Committing partition 1:"
- - " Soft Deleting 2 files: L1.1788, L1.1791"
- - " Creating 4 files"
- - "**** Simulation run 498, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1686842394885830950]). 3 Input Files, 139mb total:"
- - "L1 "
- - "L1.1787[1686842559012559461,1686842571022320444] 1686936871.55s 2mb |L1.1787| "
- - "L1.1887[1686842571022320445,1686842786390926609] 1686936871.55s 37mb |--L1.1887--| "
- - "L2 "
- - "L2.1882[1686841379000000000,1686842786390926609] 1686936871.55s 100mb|----------------------------------------L2.1882-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 139mb total:"
- - "L2 "
- - "L2.?[1686841379000000000,1686842394885830950] 1686936871.55s 100mb|-----------------------------L2.?-----------------------------| "
- - "L2.?[1686842394885830951,1686842786390926609] 1686936871.55s 39mb |---------L2.?----------| "
- - "Committing partition 1:"
- - " Soft Deleting 3 files: L1.1787, L2.1882, L1.1887"
- - " Creating 2 files"
- - "**** Simulation run 499, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1686843316416264410, 1686843846441602210]). 4 Input Files, 266mb total:"
- - "L1 "
- - "L1.1888[1686842786390926610,1686843161028605744] 1686936871.55s 63mb|-------L1.1888-------| "
- - "L1.1789[1686843161028605745,1686843751034891043] 1686936871.55s 100mb |--------------L1.1789--------------| "
- - "L1.1790[1686843751034891044,1686843763044640888] 1686936871.55s 2mb |L1.1790| "
- - "L2 "
- - "L2.1883[1686842786390926610,1686844193781853218] 1686936871.55s 100mb|----------------------------------------L2.1883-----------------------------------------|"
- - "**** 3 Output Files (parquet_file_id not yet assigned), 266mb total:"
- - "L2 "
- - "L2.?[1686842786390926610,1686843316416264410] 1686936871.55s 100mb|-------------L2.?--------------| "
- - "L2.?[1686843316416264411,1686843846441602210] 1686936871.55s 100mb |-------------L2.?--------------| "
- - "L2.?[1686843846441602211,1686844193781853218] 1686936871.55s 66mb |--------L2.?--------| "
- - "Committing partition 1:"
- - " Soft Deleting 4 files: L1.1789, L1.1790, L2.1883, L1.1888"
- - " Creating 3 files"
- - "**** Simulation run 500, type=split(ReduceOverlap)(split_times=[1686843846441602210]). 1 Input Files, 73mb total:"
- - "L1, all files 73mb "
- - "L1.1885[1686843763044640889,1686844193781853218] 1686936871.55s|----------------------------------------L1.1885-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 73mb total:"
- - "L1 "
- - "L1.?[1686843763044640889,1686843846441602210] 1686936871.55s 14mb|-----L1.?------| "
- - "L1.?[1686843846441602211,1686844193781853218] 1686936871.55s 59mb |---------------------------------L1.?---------------------------------| "
- - "Committing partition 1:"
- - " Soft Deleting 1 files: L1.1885"
- - " Creating 2 files"
- - "**** Simulation run 501, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1686843684225379958, 1686844052034495505]). 4 Input Files, 239mb total:"
- - "L1 "
- - "L1.1894[1686843763044640889,1686843846441602210] 1686936871.55s 14mb |L1.1894| "
- - "L1.1895[1686843846441602211,1686844193781853218] 1686936871.55s 59mb |-------------L1.1895-------------| "
- - "L2 "
- - "L2.1892[1686843316416264411,1686843846441602210] 1686936871.55s 100mb|----------------------L2.1892-----------------------| "
- - "L2.1893[1686843846441602211,1686844193781853218] 1686936871.55s 66mb |-------------L2.1893-------------| "
- - "**** 3 Output Files (parquet_file_id not yet assigned), 239mb total:"
- - "L2 "
- - "L2.?[1686843316416264411,1686843684225379958] 1686936871.55s 100mb|---------------L2.?----------------| "
- - "L2.?[1686843684225379959,1686844052034495505] 1686936871.55s 100mb |---------------L2.?----------------| "
- - "L2.?[1686844052034495506,1686844193781853218] 1686936871.55s 39mb |----L2.?----| "
- - "Committing partition 1:"
- - " Soft Deleting 4 files: L2.1892, L2.1893, L1.1894, L1.1895"
- - " Creating 3 files"
- - "**** Simulation run 502, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1686844848044941008, 1686845502308028797]). 3 Input Files, 212mb total:"
- - "L1 "
- - "L1.1886[1686844193781853219,1686844353050911219] 1686936871.55s 27mb|L1.1886-| "
- - "L1.1792[1686844353050911220,1686844862243243240] 1686936871.55s 86mb |------------L1.1792------------| "
- - "L2 "
- - "L2.1884[1686844193781853219,1686845579000000000] 1686936871.55s 98mb|----------------------------------------L2.1884-----------------------------------------|"
- - "**** 3 Output Files (parquet_file_id not yet assigned), 212mb total:"
- - "L2 "
- - "L2.?[1686844193781853219,1686844848044941008] 1686936871.55s 100mb|------------------L2.?------------------| "
- - "L2.?[1686844848044941009,1686845502308028797] 1686936871.55s 100mb |------------------L2.?------------------| "
- - "L2.?[1686845502308028798,1686845579000000000] 1686936871.55s 12mb |L2.?|"
- - "Committing partition 1:"
- - " Soft Deleting 3 files: L1.1792, L2.1884, L1.1886"
- - " Creating 3 files"
- - "**** Simulation run 503, type=split(ReduceOverlap)(split_times=[1686845502308028797]). 1 Input Files, 21mb total:"
- - "L1, all files 21mb "
- - "L1.1880[1686845452373250764,1686845579000000000] 1686936871.55s|----------------------------------------L1.1880-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 21mb total:"
- - "L1 "
- - "L1.?[1686845452373250764,1686845502308028797] 1686936871.55s 8mb|--------------L1.?---------------| "
- - "L1.?[1686845502308028798,1686845579000000000] 1686936871.55s 13mb |------------------------L1.?------------------------| "
- - "Committing partition 1:"
- - " Soft Deleting 1 files: L1.1880"
- - " Creating 2 files"
- - "**** Simulation run 504, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1686845161518308591, 1686845474991676173]). 5 Input Files, 233mb total:"
- - "L1 "
- - "L1.1793[1686844862243243241,1686845452373250763] 1686936871.55s 100mb |-------------------------------L1.1793--------------------------------| "
- - "L1.1902[1686845452373250764,1686845502308028797] 1686936871.55s 8mb |L1.1902| "
- - "L1.1903[1686845502308028798,1686845579000000000] 1686936871.55s 13mb |L1.1903| "
- - "L2 "
- - "L2.1900[1686844848044941009,1686845502308028797] 1686936871.55s 100mb|-----------------------------------L2.1900------------------------------------| "
- - "L2.1901[1686845502308028798,1686845579000000000] 1686936871.55s 12mb |L2.1901| "
- - "**** 3 Output Files (parquet_file_id not yet assigned), 233mb total:"
- - "L2 "
- - "L2.?[1686844848044941009,1686845161518308591] 1686936871.55s 100mb|----------------L2.?----------------| "
- - "L2.?[1686845161518308592,1686845474991676173] 1686936871.55s 100mb |----------------L2.?----------------| "
- - "L2.?[1686845474991676174,1686845579000000000] 1686936871.55s 33mb |---L2.?---| "
- - "Committing partition 1:"
- - " Soft Deleting 5 files: L1.1793, L2.1900, L2.1901, L1.1902, L1.1903"
- - " Creating 3 files"
- - "**** Simulation run 505, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1686847090988653050, 1686848602977306099]). 4 Input Files, 278mb total:"
- - "L1 "
- - "L1.1881[1686845579000000001,1686846042503258285] 1686936871.55s 79mb|L1.1881| "
- - "L1.1795[1686846042503258286,1686846054770701976] 1686936871.55s 2mb |L1.1795| "
- - "L1.1796[1686846054770701977,1686846644900712284] 1686936871.55s 100mb |-L1.1796--| "
- - "L2 "
- - "L2.25[1686845639000000000,1686849779000000000] 1686928811.43s 97mb |----------------------------------------L2.25-----------------------------------------| "
- - "**** 3 Output Files (parquet_file_id not yet assigned), 278mb total:"
- - "L2 "
- - "L2.?[1686845579000000001,1686847090988653050] 1686936871.55s 100mb|-------------L2.?-------------| "
- - "L2.?[1686847090988653051,1686848602977306099] 1686936871.55s 100mb |-------------L2.?-------------| "
- - "L2.?[1686848602977306100,1686849779000000000] 1686936871.55s 78mb |---------L2.?----------| "
- - "Committing partition 1:"
- - " Soft Deleting 4 files: L2.25, L1.1795, L1.1796, L1.1881"
- - " Creating 3 files"
- - "**** Simulation run 506, type=split(ReduceOverlap)(split_times=[1686848602977306099]). 1 Input Files, 100mb total:"
- - "L1, all files 100mb "
- - "L1.1801[1686848345486486481,1686848934953137039] 1686936871.55s|----------------------------------------L1.1801-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L1 "
- - "L1.?[1686848345486486481,1686848602977306099] 1686936871.55s 44mb|----------------L1.?-----------------| "
- - "L1.?[1686848602977306100,1686848934953137039] 1686936871.55s 56mb |----------------------L1.?----------------------| "
- - "**** Simulation run 507, type=split(ReduceOverlap)(split_times=[1686847090988653050]). 1 Input Files, 100mb total:"
- - "L1, all files 100mb "
- - "L1.1797[1686846644900712285,1686847235030722591] 1686936871.55s|----------------------------------------L1.1797-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L1 "
- - "L1.?[1686846644900712285,1686847090988653050] 1686936871.55s 76mb|-------------------------------L1.?-------------------------------| "
- - "L1.?[1686847090988653051,1686847235030722591] 1686936871.55s 24mb |-------L1.?--------| "
- - "Committing partition 1:"
- - " Soft Deleting 2 files: L1.1797, L1.1801"
- - " Creating 4 files"
- - "**** Simulation run 508, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1686846586992441776, 1686847594984883551]). 4 Input Files, 300mb total:"
- - "L1 "
- - "L1.1912[1686846644900712285,1686847090988653050] 1686936871.55s 76mb |--L1.1912--| "
- - "L1.1913[1686847090988653051,1686847235030722591] 1686936871.55s 24mb |L1.1913| "
- - "L2 "
- - "L2.1907[1686845579000000001,1686847090988653050] 1686936871.55s 100mb|------------------L2.1907------------------| "
- - "L2.1908[1686847090988653051,1686848602977306099] 1686936871.55s 100mb |-----------------L2.1908------------------| "
- - "**** 3 Output Files (parquet_file_id not yet assigned), 300mb total:"
- - "L2 "
- - "L2.?[1686845579000000001,1686846586992441776] 1686936871.55s 100mb|------------L2.?------------| "
- - "L2.?[1686846586992441777,1686847594984883551] 1686936871.55s 100mb |------------L2.?------------| "
- - "L2.?[1686847594984883552,1686848602977306099] 1686936871.55s 100mb |-----------L2.?------------| "
- - "Committing partition 1:"
- - " Soft Deleting 4 files: L2.1907, L2.1908, L1.1912, L1.1913"
- - " Creating 3 files"
- - "**** Simulation run 509, type=split(ReduceOverlap)(split_times=[1686847594984883551]). 1 Input Files, 100mb total:"
- - "L1, all files 100mb "
- - "L1.1799[1686847247298160712,1686847837428156951] 1686936871.55s|----------------------------------------L1.1799-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L1 "
- - "L1.?[1686847247298160712,1686847594984883551] 1686936871.55s 59mb|-----------------------L1.?------------------------| "
- - "L1.?[1686847594984883552,1686847837428156951] 1686936871.55s 41mb |---------------L1.?---------------| "
- - "Committing partition 1:"
- - " Soft Deleting 1 files: L1.1799"
- - " Creating 2 files"
- - "**** Simulation run 510, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1686847213091270336]). 3 Input Files, 161mb total:"
- - "L1 "
- - "L1.1798[1686847235030722592,1686847247298160711] 1686936871.55s 2mb |L1.1798| "
- - "L1.1917[1686847247298160712,1686847594984883551] 1686936871.55s 59mb |-----------L1.1917-----------| "
- - "L2 "
- - "L2.1915[1686846586992441777,1686847594984883551] 1686936871.55s 100mb|----------------------------------------L2.1915-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 161mb total:"
- - "L2 "
- - "L2.?[1686846586992441777,1686847213091270336] 1686936871.55s 100mb|------------------------L2.?-------------------------| "
- - "L2.?[1686847213091270337,1686847594984883551] 1686936871.55s 61mb |--------------L2.?--------------| "
- - "Committing partition 1:"
- - " Soft Deleting 3 files: L1.1798, L2.1915, L1.1917"
- - " Creating 2 files"
- - "**** Simulation run 511, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1686847967133302591, 1686848339281721630]). 4 Input Files, 271mb total:"
- - "L1 "
- - "L1.1918[1686847594984883552,1686847837428156951] 1686936871.55s 41mb|------L1.1918------| "
- - "L1.1800[1686847837428156952,1686848345486486480] 1686936871.55s 86mb |------------------L1.1800------------------| "
- - "L1.1910[1686848345486486481,1686848602977306099] 1686936871.55s 44mb |------L1.1910-------| "
- - "L2 "
- - "L2.1916[1686847594984883552,1686848602977306099] 1686936871.55s 100mb|----------------------------------------L2.1916-----------------------------------------|"
- - "**** 3 Output Files (parquet_file_id not yet assigned), 271mb total:"
- - "L2 "
- - "L2.?[1686847594984883552,1686847967133302591] 1686936871.55s 100mb|-------------L2.?--------------| "
- - "L2.?[1686847967133302592,1686848339281721630] 1686936871.55s 100mb |-------------L2.?--------------| "
- - "L2.?[1686848339281721631,1686848602977306099] 1686936871.55s 71mb |--------L2.?---------| "
- - "Committing partition 1:"
- - " Soft Deleting 4 files: L1.1800, L1.1910, L2.1916, L1.1918"
- - " Creating 3 files"
- - "**** Simulation run 512, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1686849027096194358, 1686849451215082616]). 4 Input Files, 277mb total:"
- - "L1 "
- - "L1.1911[1686848602977306100,1686848934953137039] 1686936871.55s 56mb|--------L1.1911--------| "
- - "L1.1802[1686848934953137040,1686849524419787597] 1686936871.55s 100mb |------------------L1.1802------------------| "
- - "L1.1878[1686849524419787598,1686849779000000000] 1686936871.55s 43mb |-----L1.1878-----| "
- - "L2 "
- - "L2.1909[1686848602977306100,1686849779000000000] 1686936871.55s 78mb|----------------------------------------L2.1909-----------------------------------------|"
- - "**** 3 Output Files (parquet_file_id not yet assigned), 277mb total:"
- - "L2 "
- - "L2.?[1686848602977306100,1686849027096194358] 1686936871.55s 100mb|-------------L2.?-------------| "
- - "L2.?[1686849027096194359,1686849451215082616] 1686936871.55s 100mb |-------------L2.?-------------| "
- - "L2.?[1686849451215082617,1686849779000000000] 1686936871.55s 77mb |---------L2.?----------| "
- - "Committing partition 1:"
- - " Soft Deleting 4 files: L1.1802, L1.1878, L2.1909, L1.1911"
- - " Creating 3 files"
- - "**** Simulation run 513, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1686851639207134472, 1686853499414268943]). 5 Input Files, 271mb total:"
- - "L1 "
- - "L1.1879[1686849779000000001,1686850087108108099] 1686936871.55s 52mb|L1.1879| "
- - "L1.1876[1686850087108108100,1686850559000000000] 1686936871.55s 59mb |L1.1876| "
- - "L1.1877[1686850559000000001,1686850880662583047] 1686936871.55s 41mb |L1.1877| "
- - "L2 "
- - "L2.27[1686849839000000000,1686850559000000000] 1686928811.43s 20mb |--L2.27---| "
- - "L2.30[1686850619000000000,1686854819000000000] 1686928811.43s 98mb |----------------------------------L2.30----------------------------------| "
- - "**** 3 Output Files (parquet_file_id not yet assigned), 271mb total:"
- - "L2 "
- - "L2.?[1686849779000000001,1686851639207134472] 1686936871.55s 100mb|-------------L2.?--------------| "
- - "L2.?[1686851639207134473,1686853499414268943] 1686936871.55s 100mb |-------------L2.?--------------| "
- - "L2.?[1686853499414268944,1686854819000000000] 1686936871.55s 71mb |--------L2.?---------| "
- - "Committing partition 1:"
- - " Soft Deleting 5 files: L2.27, L2.30, L1.1876, L1.1877, L1.1879"
- - " Creating 3 files"
- - "**** Simulation run 514, type=split(ReduceOverlap)(split_times=[1686853499414268943]). 1 Input Files, 100mb total:"
- - "L1, all files 100mb "
- - "L1.1818[1686853405911193905,1686853891419105108] 1686936871.55s|----------------------------------------L1.1818-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L1 "
- - "L1.?[1686853405911193905,1686853499414268943] 1686936871.55s 19mb|-----L1.?------| "
- - "L1.?[1686853499414268944,1686853891419105108] 1686936871.55s 81mb |---------------------------------L1.?---------------------------------| "
- - "**** Simulation run 515, type=split(ReduceOverlap)(split_times=[1686851639207134472]). 1 Input Files, 100mb total:"
- - "L1, all files 100mb "
- - "L1.1809[1686850880662583048,1686851661119027532] 1686936871.55s|----------------------------------------L1.1809-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L1 "
- - "L1.?[1686850880662583048,1686851639207134472] 1686936871.55s 97mb|----------------------------------------L1.?-----------------------------------------| "
- - "L1.?[1686851639207134473,1686851661119027532] 1686936871.55s 3mb |L1.?|"
- - "Committing partition 1:"
- - " Soft Deleting 2 files: L1.1809, L1.1818"
- - " Creating 4 files"
- - "**** Simulation run 516, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1686851019138093591, 1686852259276187181]). 4 Input Files, 300mb total:"
- - "L1 "
- - "L1.1932[1686850880662583048,1686851639207134472] 1686936871.55s 97mb |----L1.1932-----| "
- - "L1.1933[1686851639207134473,1686851661119027532] 1686936871.55s 3mb |L1.1933| "
- - "L2 "
- - "L2.1927[1686849779000000001,1686851639207134472] 1686936871.55s 100mb|------------------L2.1927------------------| "
- - "L2.1928[1686851639207134473,1686853499414268943] 1686936871.55s 100mb |-----------------L2.1928------------------| "
- - "**** 3 Output Files (parquet_file_id not yet assigned), 300mb total:"
- - "L2 "
- - "L2.?[1686849779000000001,1686851019138093591] 1686936871.55s 100mb|------------L2.?------------| "
- - "L2.?[1686851019138093592,1686852259276187181] 1686936871.55s 100mb |------------L2.?------------| "
- - "L2.?[1686852259276187182,1686853499414268943] 1686936871.55s 100mb |-----------L2.?------------| "
- - "Committing partition 1:"
- - " Soft Deleting 4 files: L2.1927, L2.1928, L1.1932, L1.1933"
- - " Creating 3 files"
- - "**** Simulation run 517, type=split(ReduceOverlap)(split_times=[1686852259276187181]). 1 Input Files, 100mb total:"
- - "L1, all files 100mb "
- - "L1.1814[1686851661119027533,1686852290761155116] 1686936871.55s|----------------------------------------L1.1814-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L1 "
- - "L1.?[1686851661119027533,1686852259276187181] 1686936871.55s 95mb|---------------------------------------L1.?----------------------------------------| "
- - "L1.?[1686852259276187182,1686852290761155116] 1686936871.55s 5mb |L1.?|"
- - "Committing partition 1:"
- - " Soft Deleting 1 files: L1.1814"
- - " Creating 2 files"
- - "**** Simulation run 518, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1686851845896821338, 1686852672655549084]). 4 Input Files, 300mb total:"
- - "L1 "
- - "L1.1937[1686851661119027533,1686852259276187181] 1686936871.55s 95mb |------L1.1937------| "
- - "L1.1938[1686852259276187182,1686852290761155116] 1686936871.55s 5mb |L1.1938| "
- - "L2 "
- - "L2.1935[1686851019138093592,1686852259276187181] 1686936871.55s 100mb|------------------L2.1935------------------| "
- - "L2.1936[1686852259276187182,1686853499414268943] 1686936871.55s 100mb |-----------------L2.1936------------------| "
- - "**** 3 Output Files (parquet_file_id not yet assigned), 300mb total:"
- - "L2 "
- - "L2.?[1686851019138093592,1686851845896821338] 1686936871.55s 100mb|------------L2.?------------| "
- - "L2.?[1686851845896821339,1686852672655549084] 1686936871.55s 100mb |------------L2.?------------| "
- - "L2.?[1686852672655549085,1686853499414268943] 1686936871.55s 100mb |-----------L2.?------------| "
- - "Committing partition 1:"
- - " Soft Deleting 4 files: L2.1935, L2.1936, L1.1937, L1.1938"
- - " Creating 3 files"
- - "**** Simulation run 519, type=split(ReduceOverlap)(split_times=[1686852672655549084]). 1 Input Files, 100mb total:"
- - "L1, all files 100mb "
- - "L1.1815[1686852290761155117,1686852920403282699] 1686936871.55s|----------------------------------------L1.1815-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L1 "
- - "L1.?[1686852290761155117,1686852672655549084] 1686936871.55s 61mb|------------------------L1.?------------------------| "
- - "L1.?[1686852672655549085,1686852920403282699] 1686936871.55s 39mb |--------------L1.?---------------| "
- - "Committing partition 1:"
- - " Soft Deleting 1 files: L1.1815"
- - " Creating 2 files"
- - "**** Simulation run 520, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1686852397069307378, 1686852948241793417]). 4 Input Files, 300mb total:"
- - "L1 "
- - "L1.1942[1686852290761155117,1686852672655549084] 1686936871.55s 61mb |-----L1.1942------| "
- - "L1.1943[1686852672655549085,1686852920403282699] 1686936871.55s 39mb |--L1.1943--| "
- - "L2 "
- - "L2.1940[1686851845896821339,1686852672655549084] 1686936871.55s 100mb|------------------L2.1940------------------| "
- - "L2.1941[1686852672655549085,1686853499414268943] 1686936871.55s 100mb |-----------------L2.1941------------------| "
- - "**** 3 Output Files (parquet_file_id not yet assigned), 300mb total:"
- - "L2 "
- - "L2.?[1686851845896821339,1686852397069307378] 1686936871.55s 100mb|------------L2.?------------| "
- - "L2.?[1686852397069307379,1686852948241793417] 1686936871.55s 100mb |------------L2.?------------| "
- - "L2.?[1686852948241793418,1686853499414268943] 1686936871.55s 100mb |-----------L2.?------------| "
- - "Committing partition 1:"
- - " Soft Deleting 4 files: L2.1940, L2.1941, L1.1942, L1.1943"
- - " Creating 3 files"
- - "**** Simulation run 521, type=split(ReduceOverlap)(split_times=[1686852948241793417]). 1 Input Files, 100mb total:"
- - "L1, all files 100mb "
- - "L1.1817[1686852920403282700,1686853405911193904] 1686936871.55s|----------------------------------------L1.1817-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L1 "
- - "L1.?[1686852920403282700,1686852948241793417] 1686936871.55s 6mb|L1.?| "
- - "L1.?[1686852948241793418,1686853405911193904] 1686936871.55s 94mb |---------------------------------------L1.?---------------------------------------| "
- - "Committing partition 1:"
- - " Soft Deleting 1 files: L1.1817"
- - " Creating 2 files"
- - "**** Simulation run 522, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1686852764517630237, 1686853131965953095]). 4 Input Files, 300mb total:"
- - "L1 "
- - "L1.1947[1686852920403282700,1686852948241793417] 1686936871.55s 6mb |L1.1947| "
- - "L1.1948[1686852948241793418,1686853405911193904] 1686936871.55s 94mb |--------------L1.1948--------------| "
- - "L2 "
- - "L2.1945[1686852397069307379,1686852948241793417] 1686936871.55s 100mb|------------------L2.1945------------------| "
- - "L2.1946[1686852948241793418,1686853499414268943] 1686936871.55s 100mb |-----------------L2.1946------------------| "
- - "**** 3 Output Files (parquet_file_id not yet assigned), 300mb total:"
- - "L2 "
- - "L2.?[1686852397069307379,1686852764517630237] 1686936871.55s 100mb|------------L2.?------------| "
- - "L2.?[1686852764517630238,1686853131965953095] 1686936871.55s 100mb |------------L2.?------------| "
- - "L2.?[1686853131965953096,1686853499414268943] 1686936871.55s 100mb |-----------L2.?------------| "
- - "Committing partition 1:"
- - " Soft Deleting 4 files: L2.1945, L2.1946, L1.1947, L1.1948"
- - " Creating 3 files"
- - "**** Simulation run 523, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1686853754631187460, 1686854377296421824]). 4 Input Files, 271mb total:"
- - "L1 "
- - "L1.1930[1686853405911193905,1686853499414268943] 1686936871.55s 19mb |L1.1930| "
- - "L1.1931[1686853499414268944,1686853891419105108] 1686936871.55s 81mb |-----L1.1931------| "
- - "L2 "
- - "L2.1951[1686853131965953096,1686853499414268943] 1686936871.55s 100mb|-----L2.1951-----| "
- - "L2.1929[1686853499414268944,1686854819000000000] 1686936871.55s 71mb |------------------------------L2.1929-------------------------------| "
- - "**** 3 Output Files (parquet_file_id not yet assigned), 271mb total:"
- - "L2 "
- - "L2.?[1686853131965953096,1686853754631187460] 1686936871.55s 100mb|-------------L2.?--------------| "
- - "L2.?[1686853754631187461,1686854377296421824] 1686936871.55s 100mb |-------------L2.?--------------| "
- - "L2.?[1686854377296421825,1686854819000000000] 1686936871.55s 71mb |--------L2.?---------| "
- - "Committing partition 1:"
- - " Soft Deleting 4 files: L2.1929, L1.1930, L1.1931, L2.1951"
- - " Creating 3 files"
- - "**** Simulation run 524, type=split(ReduceOverlap)(split_times=[1686854377296421824, 1686854819000000000]). 1 Input Files, 100mb total:"
- - "L1, all files 100mb "
- - "L1.1820[1686854243778378366,1686854840368603300] 1686936871.55s|----------------------------------------L1.1820-----------------------------------------|"
- - "**** 3 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L1 "
- - "L1.?[1686854243778378366,1686854377296421824] 1686936871.55s 22mb|-------L1.?-------| "
- - "L1.?[1686854377296421825,1686854819000000000] 1686936871.55s 74mb |------------------------------L1.?------------------------------| "
- - "L1.?[1686854819000000001,1686854840368603300] 1686936871.55s 4mb |L1.?|"
- - "Committing partition 1:"
- - " Soft Deleting 1 files: L1.1820"
- - " Creating 3 files"
- - "**** Simulation run 525, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1686854074019438293]). 3 Input Files, 195mb total:"
- - "L1 "
- - "L1.1819[1686853891419105109,1686854243778378365] 1686936871.55s 73mb |--------------------L1.1819---------------------| "
- - "L1.1955[1686854243778378366,1686854377296421824] 1686936871.55s 22mb |-----L1.1955-----| "
- - "L2 "
- - "L2.1953[1686853754631187461,1686854377296421824] 1686936871.55s 100mb|----------------------------------------L2.1953-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 195mb total:"
- - "L2 "
- - "L2.?[1686853754631187461,1686854074019438293] 1686936871.55s 100mb|--------------------L2.?--------------------| "
- - "L2.?[1686854074019438294,1686854377296421824] 1686936871.55s 95mb |------------------L2.?-------------------| "
- - "Committing partition 1:"
- - " Soft Deleting 3 files: L1.1819, L2.1953, L1.1955"
- - " Creating 2 files"
- - "**** Simulation run 526, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1686854689009102312]). 3 Input Files, 149mb total:"
- - "L1 "
- - "L1.1956[1686854377296421825,1686854819000000000] 1686936871.55s 74mb|--------------------------------------L1.1956--------------------------------------| "
- - "L1.1957[1686854819000000001,1686854840368603300] 1686936871.55s 4mb |L1.1957|"
- - "L2 "
- - "L2.1954[1686854377296421825,1686854819000000000] 1686936871.55s 71mb|--------------------------------------L2.1954--------------------------------------| "
- - "**** 2 Output Files (parquet_file_id not yet assigned), 149mb total:"
- - "L2 "
- - "L2.?[1686854377296421825,1686854689009102312] 1686936871.55s 100mb|---------------------------L2.?---------------------------| "
- - "L2.?[1686854689009102313,1686854840368603300] 1686936871.55s 49mb |-----------L2.?------------| "
- - "Committing partition 1:"
- - " Soft Deleting 3 files: L2.1954, L1.1956, L1.1957"
- - " Creating 2 files"
- - "**** Simulation run 527, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1686856536982788349, 1686858233596973397]). 3 Input Files, 246mb total:"
- - "L1 "
- - "L1.1821[1686854840368603301,1686855436958828234] 1686936871.55s 100mb|-L1.1821--| "
- - "L1.1822[1686855436958828235,1686855729962162145] 1686936871.55s 49mb |L1.1822| "
- - "L2 "
- - "L2.46[1686854879000000000,1686859019000000000] 1686928811.43s 97mb|-----------------------------------------L2.46-----------------------------------------| "
- - "**** 3 Output Files (parquet_file_id not yet assigned), 246mb total:"
- - "L2 "
- - "L2.?[1686854840368603301,1686856536982788349] 1686936871.55s 100mb|---------------L2.?---------------| "
- - "L2.?[1686856536982788350,1686858233596973397] 1686936871.55s 100mb |---------------L2.?---------------| "
- - "L2.?[1686858233596973398,1686859019000000000] 1686936871.55s 46mb |-----L2.?-----| "
- - "Committing partition 1:"
- - " Soft Deleting 3 files: L2.46, L1.1821, L1.1822"
- - " Creating 3 files"
- - "**** Simulation run 528, type=split(ReduceOverlap)(split_times=[1686858233596973397]). 1 Input Files, 100mb total:"
- - "L1, all files 100mb "
- - "L1.1827[1686857661762616885,1686858236193776466] 1686936871.55s|----------------------------------------L1.1827-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L1 "
- - "L1.?[1686857661762616885,1686858233596973397] 1686936871.55s 100mb|-----------------------------------------L1.?------------------------------------------| "
- - "L1.?[1686858233596973398,1686858236193776466] 1686936871.55s 463kb |L1.?|"
- - "**** Simulation run 529, type=split(ReduceOverlap)(split_times=[1686856536982788349]). 1 Input Files, 100mb total:"
- - "L1, all files 100mb "
- - "L1.1824[1686856308052485467,1686856886142808786] 1686936871.55s|----------------------------------------L1.1824-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L1 "
- - "L1.?[1686856308052485467,1686856536982788349] 1686936871.55s 40mb|--------------L1.?---------------| "
- - "L1.?[1686856536982788350,1686856886142808786] 1686936871.55s 60mb |------------------------L1.?------------------------| "
- - "Committing partition 1:"
- - " Soft Deleting 2 files: L1.1824, L1.1827"
- - " Creating 4 files"
- - "**** Simulation run 530, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1686855548468013502, 1686856256567423703]). 3 Input Files, 240mb total:"
- - "L1 "
- - "L1.1823[1686855729962162146,1686856308052485466] 1686936871.55s 100mb |----------L1.1823-----------| "
- - "L1.1967[1686856308052485467,1686856536982788349] 1686936871.55s 40mb |-L1.1967--| "
- - "L2 "
- - "L2.1962[1686854840368603301,1686856536982788349] 1686936871.55s 100mb|----------------------------------------L2.1962-----------------------------------------|"
- - "**** 3 Output Files (parquet_file_id not yet assigned), 240mb total:"
- - "L2 "
- - "L2.?[1686854840368603301,1686855548468013502] 1686936871.55s 100mb|---------------L2.?----------------| "
- - "L2.?[1686855548468013503,1686856256567423703] 1686936871.55s 100mb |---------------L2.?----------------| "
- - "L2.?[1686856256567423704,1686856536982788349] 1686936871.55s 40mb |----L2.?----| "
- - "Committing partition 1:"
- - " Soft Deleting 3 files: L1.1823, L2.1962, L1.1967"
- - " Creating 3 files"
- - "**** Simulation run 531, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1686857111714340888, 1686857686445893426]). 4 Input Files, 295mb total:"
- - "L1 "
- - "L1.1968[1686856536982788350,1686856886142808786] 1686936871.55s 60mb|----L1.1968-----| "
- - "L1.1825[1686856886142808787,1686857087331457301] 1686936871.55s 35mb |L1.1825-| "
- - "L1.1826[1686857087331457302,1686857661762616884] 1686936871.55s 100mb |----------L1.1826-----------| "
- - "L2 "
- - "L2.1963[1686856536982788350,1686858233596973397] 1686936871.55s 100mb|----------------------------------------L2.1963-----------------------------------------|"
- - "**** 3 Output Files (parquet_file_id not yet assigned), 295mb total:"
- - "L2 "
- - "L2.?[1686856536982788350,1686857111714340888] 1686936871.55s 100mb|------------L2.?------------| "
- - "L2.?[1686857111714340889,1686857686445893426] 1686936871.55s 100mb |------------L2.?------------| "
- - "L2.?[1686857686445893427,1686858233596973397] 1686936871.55s 95mb |-----------L2.?------------| "
- - "Committing partition 1:"
- - " Soft Deleting 4 files: L1.1825, L1.1826, L2.1963, L1.1968"
- - " Creating 3 files"
- - "**** Simulation run 532, type=split(ReduceOverlap)(split_times=[1686857686445893426]). 1 Input Files, 100mb total:"
- - "L1, all files 100mb "
- - "L1.1965[1686857661762616885,1686858233596973397] 1686936871.55s|----------------------------------------L1.1965-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L1 "
- - "L1.?[1686857661762616885,1686857686445893426] 1686936871.55s 4mb|L1.?| "
- - "L1.?[1686857686445893427,1686858233596973397] 1686936871.55s 95mb |----------------------------------------L1.?----------------------------------------| "
- - "Committing partition 1:"
- - " Soft Deleting 1 files: L1.1965"
- - " Creating 2 files"
- - "**** Simulation run 533, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1686857492337275263, 1686857872960209637]). 4 Input Files, 295mb total:"
- - "L1 "
- - "L1.1975[1686857661762616885,1686857686445893426] 1686936871.55s 4mb |L1.1975| "
- - "L1.1976[1686857686445893427,1686858233596973397] 1686936871.55s 95mb |-----------------L1.1976-----------------| "
- - "L2 "
- - "L2.1973[1686857111714340889,1686857686445893426] 1686936871.55s 100mb|------------------L2.1973-------------------| "
- - "L2.1974[1686857686445893427,1686858233596973397] 1686936871.55s 95mb |-----------------L2.1974-----------------| "
- - "**** 3 Output Files (parquet_file_id not yet assigned), 295mb total:"
- - "L2 "
- - "L2.?[1686857111714340889,1686857492337275263] 1686936871.55s 100mb|------------L2.?------------| "
- - "L2.?[1686857492337275264,1686857872960209637] 1686936871.55s 100mb |------------L2.?------------| "
- - "L2.?[1686857872960209638,1686858233596973397] 1686936871.55s 95mb |-----------L2.?-----------| "
- - "Committing partition 1:"
- - " Soft Deleting 4 files: L2.1973, L2.1974, L1.1975, L1.1976"
- - " Creating 3 files"
- - "**** Simulation run 534, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1686858684102523465, 1686859134608073532]). 7 Input Files, 281mb total:"
- - "L1 "
- - "L1.1966[1686858233596973398,1686858236193776466] 1686936871.55s 463kb|L1.1966| "
- - "L1.1828[1686858236193776467,1686858566228295774] 1686936871.55s 57mb|-------L1.1828-------| "
- - "L1.1874[1686858566228295775,1686859019000000000] 1686936871.55s 79mb |-----------L1.1874------------| "
- - "L1.1875[1686859019000000001,1686859138657133582] 1686936871.55s 21mb |L1.1875| "
- - "L1.1872[1686859138657133583,1686859499000000000] 1686936871.55s 63mb |--------L1.1872--------| "
- - "L2 "
- - "L2.1964[1686858233596973398,1686859019000000000] 1686936871.55s 46mb|-----------------------L2.1964-----------------------| "
- - "L2.54[1686859079000000000,1686859499000000000] 1686928811.43s 14mb |-----------L2.54-----------| "
- - "**** 3 Output Files (parquet_file_id not yet assigned), 281mb total:"
- - "L2 "
- - "L2.?[1686858233596973398,1686858684102523465] 1686936871.55s 100mb|-------------L2.?-------------| "
- - "L2.?[1686858684102523466,1686859134608073532] 1686936871.55s 100mb |-------------L2.?-------------| "
- - "L2.?[1686859134608073533,1686859499000000000] 1686936871.55s 81mb |---------L2.?----------| "
- - "Committing partition 1:"
- - " Soft Deleting 7 files: L2.54, L1.1828, L1.1872, L1.1874, L1.1875, L2.1964, L1.1966"
- - " Creating 3 files"
- - "**** Simulation run 535, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1686861405608233334, 1686863312216466667]). 3 Input Files, 220mb total:"
- - "L1 "
- - "L1.1873[1686859499000000001,1686859711085971389] 1686936871.55s 37mb|L1.1873| "
- - "L1.1831[1686859711085971390,1686860203735880900] 1686936871.55s 86mb |L1.1831-| "
- - "L2 "
- - "L2.56[1686859559000000000,1686863699000000000] 1686928811.43s 97mb |----------------------------------------L2.56-----------------------------------------| "
- - "**** 3 Output Files (parquet_file_id not yet assigned), 220mb total:"
- - "L2 "
- - "L2.?[1686859499000000001,1686861405608233334] 1686936871.55s 100mb|-----------------L2.?-----------------| "
- - "L2.?[1686861405608233335,1686863312216466667] 1686936871.55s 100mb |-----------------L2.?-----------------| "
- - "L2.?[1686863312216466668,1686863699000000000] 1686936871.55s 20mb |-L2.?-| "
- - "Committing partition 1:"
- - " Soft Deleting 3 files: L2.56, L1.1831, L1.1873"
- - " Creating 3 files"
- - "**** Simulation run 536, type=split(ReduceOverlap)(split_times=[1686863312216466667]). 1 Input Files, 100mb total:"
- - "L1, all files 100mb "
- - "L1.1842[1686863149270270235,1686863696277675850] 1686936871.55s|----------------------------------------L1.1842-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L1 "
- - "L1.?[1686863149270270235,1686863312216466667] 1686936871.55s 30mb|----------L1.?----------| "
- - "L1.?[1686863312216466668,1686863696277675850] 1686936871.55s 70mb |----------------------------L1.?-----------------------------| "
- - "**** Simulation run 537, type=split(ReduceOverlap)(split_times=[1686861405608233334]). 1 Input Files, 100mb total:"
- - "L1, all files 100mb "
- - "L1.1838[1686861358211972493,1686861864947613463] 1686936871.55s|----------------------------------------L1.1838-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L1 "
- - "L1.?[1686861358211972493,1686861405608233334] 1686936871.55s 9mb|-L1.?-| "
- - "L1.?[1686861405608233335,1686861864947613463] 1686936871.55s 91mb |-------------------------------------L1.?--------------------------------------| "
- - "Committing partition 1:"
- - " Soft Deleting 2 files: L1.1838, L1.1842"
- - " Creating 4 files"
- - "**** Simulation run 538, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1686860134536077779, 1686860770072155557]). 3 Input Files, 300mb total:"
- - "L1, all files 100mb "
- - "L1.1832[1686860203735880901,1686860851476331520] 1686936871.55s |----------L1.1832-----------| "
- - "L1.1837[1686860851476331521,1686861358211972492] 1686936871.55s |-------L1.1837-------| "
- - "L2, all files 100mb "
- - "L2.1983[1686859499000000001,1686861405608233334] 1686936871.55s|----------------------------------------L2.1983-----------------------------------------|"
- - "**** 3 Output Files (parquet_file_id not yet assigned), 300mb total:"
- - "L2 "
- - "L2.?[1686859499000000001,1686860134536077779] 1686936871.55s 100mb|------------L2.?------------| "
- - "L2.?[1686860134536077780,1686860770072155557] 1686936871.55s 100mb |-----------L2.?------------| "
- - "L2.?[1686860770072155558,1686861405608233334] 1686936871.55s 100mb |-----------L2.?------------| "
- - "Committing partition 1:"
- - " Soft Deleting 3 files: L1.1832, L1.1837, L2.1983"
- - " Creating 3 files"
- - "**** Simulation run 539, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1686861617453595289, 1686862464835035020]). 4 Input Files, 300mb total:"
- - "L1 "
- - "L1.1988[1686861358211972493,1686861405608233334] 1686936871.55s 9mb |L1.1988| "
- - "L1.1989[1686861405608233335,1686861864947613463] 1686936871.55s 91mb |---L1.1989----| "
- - "L2 "
- - "L2.1992[1686860770072155558,1686861405608233334] 1686936871.55s 100mb|------L2.1992-------| "
- - "L2.1984[1686861405608233335,1686863312216466667] 1686936871.55s 100mb |-----------------------------L2.1984-----------------------------| "
- - "**** 3 Output Files (parquet_file_id not yet assigned), 300mb total:"
- - "L2 "
- - "L2.?[1686860770072155558,1686861617453595289] 1686936871.55s 100mb|------------L2.?------------| "
- - "L2.?[1686861617453595290,1686862464835035020] 1686936871.55s 100mb |------------L2.?------------| "
- - "L2.?[1686862464835035021,1686863312216466667] 1686936871.55s 100mb |-----------L2.?------------| "
- - "Committing partition 1:"
- - " Soft Deleting 4 files: L2.1984, L1.1988, L1.1989, L2.1992"
- - " Creating 3 files"
- - "**** Simulation run 540, type=split(ReduceOverlap)(split_times=[1686862464835035020]). 1 Input Files, 100mb total:"
- - "L1, all files 100mb "
- - "L1.1840[1686862278459459426,1686862801147318435] 1686936871.55s|----------------------------------------L1.1840-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L1 "
- - "L1.?[1686862278459459426,1686862464835035020] 1686936871.55s 36mb|-------------L1.?-------------| "
- - "L1.?[1686862464835035021,1686862801147318435] 1686936871.55s 64mb |-------------------------L1.?--------------------------| "
- - "Committing partition 1:"
- - " Soft Deleting 1 files: L1.1840"
- - " Creating 2 files"
- - "**** Simulation run 541, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1686862007484245554, 1686862397514895818]). 3 Input Files, 217mb total:"
- - "L1 "
- - "L1.1839[1686861864947613464,1686862278459459425] 1686936871.55s 82mb |-----------------L1.1839-----------------| "
- - "L1.1996[1686862278459459426,1686862464835035020] 1686936871.55s 36mb |-----L1.1996-----| "
- - "L2 "
- - "L2.1994[1686861617453595290,1686862464835035020] 1686936871.55s 100mb|----------------------------------------L2.1994-----------------------------------------|"
- - "**** 3 Output Files (parquet_file_id not yet assigned), 217mb total:"
- - "L2 "
- - "L2.?[1686861617453595290,1686862007484245554] 1686936871.55s 100mb|-----------------L2.?------------------| "
- - "L2.?[1686862007484245555,1686862397514895818] 1686936871.55s 100mb |-----------------L2.?------------------| "
- - "L2.?[1686862397514895819,1686862464835035020] 1686936871.55s 17mb |L2.?-| "
- - "Committing partition 1:"
- - " Soft Deleting 3 files: L1.1839, L2.1994, L1.1996"
- - " Creating 3 files"
- - "**** Simulation run 542, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1686862789833508891, 1686863114831982761]). 4 Input Files, 261mb total:"
- - "L1 "
- - "L1.1997[1686862464835035021,1686862801147318435] 1686936871.55s 64mb|-------------L1.1997-------------| "
- - "L1.1841[1686862801147318436,1686863149270270234] 1686936871.55s 67mb |-------------L1.1841--------------| "
- - "L1.1986[1686863149270270235,1686863312216466667] 1686936871.55s 30mb |----L1.1986----| "
- - "L2 "
- - "L2.1995[1686862464835035021,1686863312216466667] 1686936871.55s 100mb|----------------------------------------L2.1995-----------------------------------------|"
- - "**** 3 Output Files (parquet_file_id not yet assigned), 261mb total:"
- - "L2 "
- - "L2.?[1686862464835035021,1686862789833508891] 1686936871.55s 100mb|--------------L2.?--------------| "
- - "L2.?[1686862789833508892,1686863114831982761] 1686936871.55s 100mb |--------------L2.?--------------| "
- - "L2.?[1686863114831982762,1686863312216466667] 1686936871.55s 61mb |-------L2.?-------| "
- - "Committing partition 1:"
- - " Soft Deleting 4 files: L1.1841, L1.1986, L2.1995, L1.1997"
- - " Creating 3 files"
- - "**** Simulation run 543, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1686864893143269255, 1686866474070071842]). 5 Input Files, 286mb total:"
- - "L1 "
- - "L1.1987[1686863312216466668,1686863696277675850] 1686936871.55s 70mb|L1.1987| "
- - "L1.1870[1686863696277675851,1686863699000000000] 1686936871.55s 510kb |L1.1870| "
- - "L1.1871[1686863699000000001,1686864243285081465] 1686936871.55s 100mb |L1.1871-| "
- - "L2 "
- - "L2.1985[1686863312216466668,1686863699000000000] 1686936871.55s 20mb|L2.1985| "
- - "L2.59[1686863759000000000,1686867839000000000] 1686928811.43s 96mb |-------------------------------------L2.59-------------------------------------| "
- - "**** 3 Output Files (parquet_file_id not yet assigned), 286mb total:"
- - "L2 "
- - "L2.?[1686863312216466668,1686864893143269255] 1686936871.55s 100mb|------------L2.?-------------| "
- - "L2.?[1686864893143269256,1686866474070071842] 1686936871.55s 100mb |------------L2.?-------------| "
- - "L2.?[1686866474070071843,1686867839000000000] 1686936871.55s 86mb |----------L2.?-----------| "
- - "Committing partition 1:"
- - " Soft Deleting 5 files: L2.59, L1.1870, L1.1871, L2.1985, L1.1987"
- - " Creating 3 files"
- - "**** Simulation run 544, type=split(ReduceOverlap)(split_times=[1686866474070071842]). 1 Input Files, 67mb total:"
- - "L1, all files 67mb "
- - "L1.1847[1686866132997818681,1686866632513513490] 1686936871.55s|----------------------------------------L1.1847-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 67mb total:"
- - "L1 "
- - "L1.?[1686866132997818681,1686866474070071842] 1686936871.55s 46mb|---------------------------L1.?----------------------------| "
- - "L1.?[1686866474070071843,1686866632513513490] 1686936871.55s 21mb |-----------L1.?-----------| "
- - "**** Simulation run 545, type=split(ReduceOverlap)(split_times=[1686864893143269255]). 1 Input Files, 100mb total:"
- - "L1, all files 100mb "
- - "L1.1845[1686864651607515460,1686865392302667070] 1686936871.55s|----------------------------------------L1.1845-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L1 "
- - "L1.?[1686864651607515460,1686864893143269255] 1686936871.55s 33mb|-----------L1.?------------| "
- - "L1.?[1686864893143269256,1686865392302667070] 1686936871.55s 67mb |---------------------------L1.?---------------------------| "
- - "Committing partition 1:"
- - " Soft Deleting 2 files: L1.1845, L1.1847"
- - " Creating 4 files"
- - "**** Simulation run 546, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1686864075006108740, 1686864837795750812]). 3 Input Files, 207mb total:"
- - "L1 "
- - "L1.1844[1686864243285081466,1686864651607515459] 1686936871.55s 75mb |-------L1.1844-------| "
- - "L1.2009[1686864651607515460,1686864893143269255] 1686936871.55s 33mb |--L1.2009--| "
- - "L2 "
- - "L2.2004[1686863312216466668,1686864893143269255] 1686936871.55s 100mb|----------------------------------------L2.2004-----------------------------------------|"
- - "**** 3 Output Files (parquet_file_id not yet assigned), 207mb total:"
- - "L2 "
- - "L2.?[1686863312216466668,1686864075006108740] 1686936871.55s 100mb|------------------L2.?-------------------| "
- - "L2.?[1686864075006108741,1686864837795750812] 1686936871.55s 100mb |------------------L2.?-------------------| "
- - "L2.?[1686864837795750813,1686864893143269255] 1686936871.55s 7mb |L2.?|"
- - "Committing partition 1:"
- - " Soft Deleting 3 files: L1.1844, L2.2004, L1.2009"
- - " Creating 3 files"
- - "**** Simulation run 547, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1686865484385600871, 1686866075627932486]). 3 Input Files, 267mb total:"
- - "L1 "
- - "L1.2010[1686864893143269256,1686865392302667070] 1686936871.55s 67mb|---------L1.2010----------| "
- - "L1.1846[1686865392302667071,1686866132997818680] 1686936871.55s 100mb |----------------L1.1846-----------------| "
- - "L2 "
- - "L2.2005[1686864893143269256,1686866474070071842] 1686936871.55s 100mb|----------------------------------------L2.2005-----------------------------------------|"
- - "**** 3 Output Files (parquet_file_id not yet assigned), 267mb total:"
- - "L2 "
- - "L2.?[1686864893143269256,1686865484385600871] 1686936871.55s 100mb|-------------L2.?--------------| "
- - "L2.?[1686865484385600872,1686866075627932486] 1686936871.55s 100mb |-------------L2.?--------------| "
- - "L2.?[1686866075627932487,1686866474070071842] 1686936871.55s 67mb |--------L2.?--------| "
- - "Committing partition 1:"
- - " Soft Deleting 3 files: L1.1846, L2.2005, L1.2010"
- - " Creating 3 files"
- - "**** Simulation run 548, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1686866872932211465, 1686867670236490443]). 4 Input Files, 221mb total:"
- - "L1 "
- - "L1.2007[1686866132997818681,1686866474070071842] 1686936871.55s 46mb |----L1.2007----| "
- - "L1.2008[1686866474070071843,1686866632513513490] 1686936871.55s 21mb |L1.2008| "
- - "L2 "
- - "L2.2016[1686866075627932487,1686866474070071842] 1686936871.55s 67mb|-----L2.2016------| "
- - "L2.2006[1686866474070071843,1686867839000000000] 1686936871.55s 86mb |------------------------------L2.2006------------------------------| "
- - "**** 3 Output Files (parquet_file_id not yet assigned), 221mb total:"
- - "L2 "
- - "L2.?[1686866075627932487,1686866872932211465] 1686936871.55s 100mb|-----------------L2.?-----------------| "
- - "L2.?[1686866872932211466,1686867670236490443] 1686936871.55s 100mb |-----------------L2.?-----------------| "
- - "L2.?[1686867670236490444,1686867839000000000] 1686936871.55s 21mb |-L2.?-| "
- - "Committing partition 1:"
- - " Soft Deleting 4 files: L2.2006, L1.2007, L1.2008, L2.2016"
- - " Creating 3 files"
- - "**** Simulation run 549, type=split(ReduceOverlap)(split_times=[1686867670236490443]). 1 Input Files, 82mb total:"
- - "L1, all files 82mb "
- - "L1.1868[1686867294213029770,1686867839000000000] 1686936871.55s|----------------------------------------L1.1868-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 82mb total:"
- - "L1 "
- - "L1.?[1686867294213029770,1686867670236490443] 1686936871.55s 57mb|----------------------------L1.?----------------------------| "
- - "L1.?[1686867670236490444,1686867839000000000] 1686936871.55s 26mb |----------L1.?-----------| "
- - "**** Simulation run 550, type=split(ReduceOverlap)(split_times=[1686866872932211465]). 1 Input Files, 100mb total:"
- - "L1, all files 100mb "
- - "L1.1848[1686866632513513491,1686867294213029769] 1686936871.55s|----------------------------------------L1.1848-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L1 "
- - "L1.?[1686866632513513491,1686866872932211465] 1686936871.55s 36mb|-------------L1.?-------------| "
- - "L1.?[1686866872932211466,1686867294213029769] 1686936871.55s 64mb |-------------------------L1.?--------------------------| "
- - "Committing partition 1:"
- - " Soft Deleting 2 files: L1.1848, L1.1868"
- - " Creating 4 files"
- - "**** Simulation run 551, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1686866607164120163, 1686867138700307839]). 4 Input Files, 300mb total:"
- - "L1 "
- - "L1.2022[1686866632513513491,1686866872932211465] 1686936871.55s 36mb |--L1.2022--| "
- - "L1.2023[1686866872932211466,1686867294213029769] 1686936871.55s 64mb |-------L1.2023-------| "
- - "L2 "
- - "L2.2017[1686866075627932487,1686866872932211465] 1686936871.55s 100mb|------------------L2.2017------------------| "
- - "L2.2018[1686866872932211466,1686867670236490443] 1686936871.55s 100mb |-----------------L2.2018------------------| "
- - "**** 3 Output Files (parquet_file_id not yet assigned), 300mb total:"
- - "L2 "
- - "L2.?[1686866075627932487,1686866607164120163] 1686936871.55s 100mb|------------L2.?------------| "
- - "L2.?[1686866607164120164,1686867138700307839] 1686936871.55s 100mb |------------L2.?------------| "
- - "L2.?[1686867138700307840,1686867670236490443] 1686936871.55s 100mb |-----------L2.?------------| "
- - "Committing partition 1:"
- - " Soft Deleting 4 files: L2.2017, L2.2018, L1.2022, L1.2023"
- - " Creating 3 files"
- - "**** Simulation run 552, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1686867545868269737, 1686867953036231634]). 7 Input Files, 290mb total:"
- - "L1 "
- - "L1.2020[1686867294213029770,1686867670236490443] 1686936871.55s 57mb |---------L1.2020----------| "
- - "L1.2021[1686867670236490444,1686867839000000000] 1686936871.55s 26mb |-L1.2021--| "
- - "L1.1869[1686867839000000001,1686867955912546047] 1686936871.55s 18mb |L1.1869| "
- - "L1.1850[1686867955912546048,1686868319000000000] 1686936871.55s 55mb |---------L1.1850---------| "
- - "L2 "
- - "L2.2026[1686867138700307840,1686867670236490443] 1686936871.55s 100mb|---------------L2.2026----------------| "
- - "L2.2019[1686867670236490444,1686867839000000000] 1686936871.55s 21mb |-L2.2019--| "
- - "L2.74[1686867899000000000,1686868319000000000] 1686928811.43s 14mb |------------L2.74-------------| "
- - "**** 3 Output Files (parquet_file_id not yet assigned), 290mb total:"
- - "L2 "
- - "L2.?[1686867138700307840,1686867545868269737] 1686936871.55s 100mb|------------L2.?-------------| "
- - "L2.?[1686867545868269738,1686867953036231634] 1686936871.55s 100mb |------------L2.?-------------| "
- - "L2.?[1686867953036231635,1686868319000000000] 1686936871.55s 90mb |----------L2.?-----------| "
- - "Committing partition 1:"
- - " Soft Deleting 7 files: L2.74, L1.1850, L1.1869, L2.2019, L1.2020, L1.2021, L2.2026"
- - " Creating 3 files"
- - "**** Simulation run 553, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1686870526488100069, 1686872733976200137]). 3 Input Files, 239mb total:"
- - "L1 "
- - "L1.1851[1686868319000000001,1686869101428723112] 1686936871.55s 100mb|--L1.1851--| "
- - "L1.1856[1686869101428723113,1686869556056907425] 1686936871.55s 100mb |L1.1856| "
- - "L2 "
- - "L2.78[1686868379000000000,1686873599000000000] 1686928118.43s 39mb |----------------------------------------L2.78-----------------------------------------| "
- - "**** 3 Output Files (parquet_file_id not yet assigned), 239mb total:"
- - "L2 "
- - "L2.?[1686868319000000001,1686870526488100069] 1686936871.55s 100mb|---------------L2.?----------------| "
- - "L2.?[1686870526488100070,1686872733976200137] 1686936871.55s 100mb |---------------L2.?----------------| "
- - "L2.?[1686872733976200138,1686873599000000000] 1686936871.55s 39mb |----L2.?----| "
- - "Committing partition 1:"
- - " Soft Deleting 3 files: L2.78, L1.1851, L1.1856"
- - " Creating 3 files"
- - "**** Simulation run 554, type=split(ReduceOverlap)(split_times=[1686872733976200137]). 1 Input Files, 100mb total:"
- - "L1, all files 100mb "
- - "L1.1865[1686872264751876845,1686872735710689569] 1686936871.55s|----------------------------------------L1.1865-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L1 "
- - "L1.?[1686872264751876845,1686872733976200137] 1686936871.55s 100mb|-----------------------------------------L1.?------------------------------------------| "
- - "L1.?[1686872733976200138,1686872735710689569] 1686936871.55s 377kb |L1.?|"
- - "**** Simulation run 555, type=split(ReduceOverlap)(split_times=[1686870526488100069]). 1 Input Files, 100mb total:"
- - "L1, all files 100mb "
- - "L1.1859[1686870115756756731,1686870726636810267] 1686936871.55s|----------------------------------------L1.1859-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L1 "
- - "L1.?[1686870115756756731,1686870526488100069] 1686936871.55s 67mb|---------------------------L1.?---------------------------| "
- - "L1.?[1686870526488100070,1686870726636810267] 1686936871.55s 33mb |-----------L1.?------------| "
- - "Committing partition 1:"
- - " Soft Deleting 2 files: L1.1859, L1.1865"
- - " Creating 4 files"
- - "**** Simulation run 556, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1686869079291584458, 1686869839583168915]). 4 Input Files, 290mb total:"
- - "L1 "
- - "L1.1857[1686869556056907426,1686870010685091737] 1686936871.55s 100mb |----L1.1857-----| "
- - "L1.1858[1686870010685091738,1686870115756756730] 1686936871.55s 23mb |L1.1858| "
- - "L1.2035[1686870115756756731,1686870526488100069] 1686936871.55s 67mb |---L1.2035----| "
- - "L2 "
- - "L2.2030[1686868319000000001,1686870526488100069] 1686936871.55s 100mb|----------------------------------------L2.2030-----------------------------------------|"
- - "**** 3 Output Files (parquet_file_id not yet assigned), 290mb total:"
- - "L2 "
- - "L2.?[1686868319000000001,1686869079291584458] 1686936871.55s 100mb|------------L2.?------------| "
- - "L2.?[1686869079291584459,1686869839583168915] 1686936871.55s 100mb |------------L2.?------------| "
- - "L2.?[1686869839583168916,1686870526488100069] 1686936871.55s 90mb |-----------L2.?-----------| "
- - "Committing partition 1:"
- - " Soft Deleting 4 files: L1.1857, L1.1858, L2.2030, L1.2035"
- - " Creating 3 files"
- - "**** Simulation run 557, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1686871474868504238, 1686872423248908406]). 3 Input Files, 233mb total:"
- - "L1 "
- - "L1.2036[1686870526488100070,1686870726636810267] 1686936871.55s 33mb|L1.2036| "
- - "L1.1860[1686870726636810268,1686871337516863803] 1686936871.55s 100mb |-------L1.1860--------| "
- - "L2 "
- - "L2.2031[1686870526488100070,1686872733976200137] 1686936871.55s 100mb|----------------------------------------L2.2031-----------------------------------------|"
- - "**** 3 Output Files (parquet_file_id not yet assigned), 233mb total:"
- - "L2 "
- - "L2.?[1686870526488100070,1686871474868504238] 1686936871.55s 100mb|----------------L2.?----------------| "
- - "L2.?[1686871474868504239,1686872423248908406] 1686936871.55s 100mb |----------------L2.?----------------| "
- - "L2.?[1686872423248908407,1686872733976200137] 1686936871.55s 33mb |---L2.?---| "
- - "Committing partition 1:"
- - " Soft Deleting 3 files: L1.1860, L2.2031, L1.2036"
- - " Creating 3 files"
- - "**** Simulation run 558, type=split(ReduceOverlap)(split_times=[1686872423248908406]). 1 Input Files, 100mb total:"
- - "L1, all files 100mb "
- - "L1.2033[1686872264751876845,1686872733976200137] 1686936871.55s|----------------------------------------L1.2033-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L1 "
- - "L1.?[1686872264751876845,1686872423248908406] 1686936871.55s 34mb|------------L1.?------------| "
- - "L1.?[1686872423248908407,1686872733976200137] 1686936871.55s 66mb |--------------------------L1.?---------------------------| "
- - "**** Simulation run 559, type=split(ReduceOverlap)(split_times=[1686871474868504238]). 1 Input Files, 100mb total:"
- - "L1, all files 100mb "
- - "L1.1862[1686871337516863804,1686871801134370324] 1686936871.55s|----------------------------------------L1.1862-----------------------------------------|"
- - "**** 2 Output Files (parquet_file_id not yet assigned), 100mb total:"
- - "L1 "
- - "L1.?[1686871337516863804,1686871474868504238] 1686936871.55s 30mb|----------L1.?----------| "
- - "L1.?[1686871474868504239,1686871801134370324] 1686936871.55s 70mb |----------------------------L1.?-----------------------------| "
- - "Committing partition 1:"
- - " Soft Deleting 2 files: L1.1862, L1.2033"
- - " Creating 4 files"
- - "**** Simulation run 560, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1686871158741704859, 1686871790995309648]). 4 Input Files, 300mb total:"
- - "L1 "
- - "L1.2045[1686871337516863804,1686871474868504238] 1686936871.55s 30mb |L1.2045| "
- - "L1.2046[1686871474868504239,1686871801134370324] 1686936871.55s 70mb |---L1.2046---| "
- - "L2 "
- - "L2.2040[1686870526488100070,1686871474868504238] 1686936871.55s 100mb|------------------L2.2040------------------| "
- - "L2.2041[1686871474868504239,1686872423248908406] 1686936871.55s 100mb |-----------------L2.2041------------------| "
- - "**** 3 Output Files (parquet_file_id not yet assigned), 300mb total:"
- - "L2 "
- - "L2.?[1686870526488100070,1686871158741704859] 1686936871.55s 100mb|------------L2.?------------| "
- - "L2.?[1686871158741704860,1686871790995309648] 1686936871.55s 100mb |------------L2.?------------| "
- - "L2.?[1686871790995309649,1686872423248908406] 1686936871.55s 100mb |-----------L2.?------------| "
- - "Committing partition 1:"
- - " Soft Deleting 4 files: L2.2040, L2.2041, L1.2045, L1.2046"
- - " Creating 3 files"
- - "**** Simulation run 561, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1686872061589130317, 1686872332182950985]). 3 Input Files, 234mb total:"
- - "L1 "
- - "L1.1863[1686871801134370325,1686872264751876844] 1686936871.55s 100mb |----------------------------L1.1863----------------------------| "
- - "L1.2043[1686872264751876845,1686872423248908406] 1686936871.55s 34mb |------L1.2043-------| "
- - "L2 "
- - "L2.2049[1686871790995309649,1686872423248908406] 1686936871.55s 100mb|----------------------------------------L2.2049-----------------------------------------|"
- - "**** 3 Output Files (parquet_file_id not yet assigned), 234mb total:"
- - "L2 "
- - "L2.?[1686871790995309649,1686872061589130317] 1686936871.55s 100mb|----------------L2.?----------------| "
- - "L2.?[1686872061589130318,1686872332182950985] 1686936871.55s 100mb |----------------L2.?----------------| "
- - "L2.?[1686872332182950986,1686872423248908406] 1686936871.55s 34mb |---L2.?---| "
- - "Committing partition 1:"
- - " Soft Deleting 3 files: L1.1863, L1.2043, L2.2049"
- - " Creating 3 files"
- - "**** Simulation run 562, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1686872916648819217, 1686873410048730027]). 5 Input Files, 238mb total:"
- - "L1 "
- - "L1.2044[1686872423248908407,1686872733976200137] 1686936871.55s 66mb|-------L1.2044-------| "
- - "L1.2034[1686872733976200138,1686872735710689569] 1686936871.55s 377kb |L1.2034| "
- - "L1.1866[1686872735710689570,1686873206669502293] 1686936871.55s 100mb |-------------L1.1866--------------| "
- - "L2 "
- - "L2.2042[1686872423248908407,1686872733976200137] 1686936871.55s 33mb|-------L2.2042-------| "
- - "L2.2032[1686872733976200138,1686873599000000000] 1686936871.55s 39mb |----------------------------L2.2032-----------------------------| "
- - "**** 3 Output Files (parquet_file_id not yet assigned), 238mb total:"
- - "L2 "
- - "L2.?[1686872423248908407,1686872916648819217] 1686936871.55s 100mb|---------------L2.?----------------| "
- - "L2.?[1686872916648819218,1686873410048730027] 1686936871.55s 100mb |---------------L2.?----------------| "
- - "L2.?[1686873410048730028,1686873599000000000] 1686936871.55s 38mb |----L2.?----| "
- - "Committing partition 1:"
- - " Soft Deleting 5 files: L1.1866, L2.2032, L1.2034, L2.2042, L1.2044"
- - " Creating 3 files"
- "**** Final Output Files (49.67gb written)"
- "L1 "
- "L1.1867[1686873206669502294,1686873599000000000] 1686936871.55s 83mb |L1.1867|"
@@ -8057,3 +1187,244 @@ async fn stuck() {
"###
);
}
+
+#[tokio::test]
+async fn stuck_l1() {
+ test_helpers::maybe_start_logging();
+
+ let setup = layout_setup_builder()
+ .await
+ .with_max_num_files_per_plan(20)
+ .with_max_desired_file_size_bytes(MAX_DESIRED_FILE_SIZE)
+ .with_partition_timeout(Duration::from_millis(100))
+ .build()
+ .await;
+
+ setup
+ .partition
+ .create_parquet_file(
+ parquet_builder()
+ .with_min_time(1686873630000000000)
+ .with_max_time(1686879712000000000)
+ .with_compaction_level(CompactionLevel::Final)
+ .with_max_l0_created_at(Time::from_timestamp_nanos(1686927078592450239))
+ .with_file_size_bytes(104071379),
+ )
+ .await;
+
+ setup
+ .partition
+ .create_parquet_file(
+ parquet_builder()
+ .with_min_time(1686873630000000000)
+ .with_max_time(1686920683000000000)
+ .with_compaction_level(CompactionLevel::FileNonOverlapped)
+ .with_max_l0_created_at(Time::from_timestamp_nanos(1686928116935534089))
+ .with_file_size_bytes(74761432),
+ )
+ .await;
+
+ setup
+ .partition
+ .create_parquet_file(
+ parquet_builder()
+ .with_min_time(1686879750000000000)
+ .with_max_time(1686885832000000000)
+ .with_compaction_level(CompactionLevel::Final)
+ .with_max_l0_created_at(Time::from_timestamp_nanos(1686927078592450239))
+ .with_file_size_bytes(104046636),
+ )
+ .await;
+
+ setup
+ .partition
+ .create_parquet_file(
+ parquet_builder()
+ .with_min_time(1686885870000000000)
+ .with_max_time(1686888172000000000)
+ .with_compaction_level(CompactionLevel::Final)
+ .with_max_l0_created_at(Time::from_timestamp_nanos(1686927078592450239))
+ .with_file_size_bytes(39504848),
+ )
+ .await;
+
+ setup
+ .partition
+ .create_parquet_file(
+ parquet_builder()
+ .with_min_time(1686888210000000000)
+ .with_max_time(1686894292000000000)
+ .with_compaction_level(CompactionLevel::Final)
+ .with_max_l0_created_at(Time::from_timestamp_nanos(1686927078592450239))
+ .with_file_size_bytes(104068640),
+ )
+ .await;
+
+ setup
+ .partition
+ .create_parquet_file(
+ parquet_builder()
+ .with_min_time(1686894330000000000)
+ .with_max_time(1686900412000000000)
+ .with_compaction_level(CompactionLevel::Final)
+ .with_max_l0_created_at(Time::from_timestamp_nanos(1686927078592450239))
+ .with_file_size_bytes(104024462),
+ )
+ .await;
+
+ setup
+ .partition
+ .create_parquet_file(
+ parquet_builder()
+ .with_min_time(1686900450000000000)
+ .with_max_time(1686901072000000000)
+ .with_compaction_level(CompactionLevel::Final)
+ .with_max_l0_created_at(Time::from_timestamp_nanos(1686927078592450239))
+ .with_file_size_bytes(12847477),
+ )
+ .await;
+
+ setup
+ .partition
+ .create_parquet_file(
+ parquet_builder()
+ .with_min_time(1686901110000000000)
+ .with_max_time(1686907132000000000)
+ .with_compaction_level(CompactionLevel::Final)
+ .with_max_l0_created_at(Time::from_timestamp_nanos(1686927078592450239))
+ .with_file_size_bytes(103082698),
+ )
+ .await;
+
+ setup
+ .partition
+ .create_parquet_file(
+ parquet_builder()
+ .with_min_time(1686907170000000000)
+ .with_max_time(1686910072000000000)
+ .with_compaction_level(CompactionLevel::Final)
+ .with_max_l0_created_at(Time::from_timestamp_nanos(1686927078592450239))
+ .with_file_size_bytes(51292692),
+ )
+ .await;
+
+ setup
+ .partition
+ .create_parquet_file(
+ parquet_builder()
+ .with_min_time(1686910110000000000)
+ .with_max_time(1686919792000000000)
+ .with_compaction_level(CompactionLevel::Final)
+ .with_max_l0_created_at(Time::from_timestamp_nanos(1686926864318936602))
+ .with_file_size_bytes(105671599),
+ )
+ .await;
+
+ setup
+ .partition
+ .create_parquet_file(
+ parquet_builder()
+ .with_min_time(1686919830000000000)
+ .with_max_time(1686926803000000000)
+ .with_compaction_level(CompactionLevel::Final)
+ .with_max_l0_created_at(Time::from_timestamp_nanos(1686926864318936602))
+ .with_file_size_bytes(71282156),
+ )
+ .await;
+
+ setup
+ .partition
+ .create_parquet_file(
+ parquet_builder()
+ .with_min_time(1686920730000000000)
+ .with_max_time(1686926803000000000)
+ .with_compaction_level(CompactionLevel::FileNonOverlapped)
+ .with_max_l0_created_at(Time::from_timestamp_nanos(1686928116935534089))
+ .with_file_size_bytes(38566243),
+ )
+ .await;
+
+ insta::assert_yaml_snapshot!(
+ run_layout_scenario(&setup).await,
+ @r###"
+ ---
+ - "**** Input Files "
+ - "L1 "
+ - "L1.2[1686873630000000000,1686920683000000000] 1686928116.94s 71mb|------------------------------------L1.2-------------------------------------| "
+ - "L1.12[1686920730000000000,1686926803000000000] 1686928116.94s 37mb |-L1.12--| "
+ - "L2 "
+ - "L2.1[1686873630000000000,1686879712000000000] 1686927078.59s 99mb|--L2.1--| "
+ - "L2.3[1686879750000000000,1686885832000000000] 1686927078.59s 99mb |--L2.3--| "
+ - "L2.4[1686885870000000000,1686888172000000000] 1686927078.59s 38mb |L2.4| "
+ - "L2.5[1686888210000000000,1686894292000000000] 1686927078.59s 99mb |--L2.5--| "
+ - "L2.6[1686894330000000000,1686900412000000000] 1686927078.59s 99mb |--L2.6--| "
+ - "L2.7[1686900450000000000,1686901072000000000] 1686927078.59s 12mb |L2.7| "
+ - "L2.8[1686901110000000000,1686907132000000000] 1686927078.59s 98mb |--L2.8--| "
+ - "L2.9[1686907170000000000,1686910072000000000] 1686927078.59s 49mb |L2.9| "
+ - "L2.10[1686910110000000000,1686919792000000000] 1686926864.32s 101mb |----L2.10-----| "
+ - "L2.11[1686919830000000000,1686926803000000000] 1686926864.32s 68mb |--L2.11--| "
+ - "**** Simulation run 0, type=split(ReduceOverlap)(split_times=[1686879712000000000, 1686885832000000000, 1686888172000000000, 1686894292000000000, 1686900412000000000, 1686901072000000000, 1686907132000000000, 1686910072000000000, 1686919792000000000]). 1 Input Files, 71mb total:"
+ - "L1, all files 71mb "
+ - "L1.2[1686873630000000000,1686920683000000000] 1686928116.94s|------------------------------------------L1.2------------------------------------------|"
+ - "**** 10 Output Files (parquet_file_id not yet assigned), 71mb total:"
+ - "L1 "
+ - "L1.?[1686873630000000000,1686879712000000000] 1686928116.94s 9mb|--L1.?---| "
+ - "L1.?[1686879712000000001,1686885832000000000] 1686928116.94s 9mb |--L1.?---| "
+ - "L1.?[1686885832000000001,1686888172000000000] 1686928116.94s 4mb |L1.?| "
+ - "L1.?[1686888172000000001,1686894292000000000] 1686928116.94s 9mb |--L1.?---| "
+ - "L1.?[1686894292000000001,1686900412000000000] 1686928116.94s 9mb |--L1.?---| "
+ - "L1.?[1686900412000000001,1686901072000000000] 1686928116.94s 1mb |L1.?| "
+ - "L1.?[1686901072000000001,1686907132000000000] 1686928116.94s 9mb |--L1.?---| "
+ - "L1.?[1686907132000000001,1686910072000000000] 1686928116.94s 4mb |L1.?| "
+ - "L1.?[1686910072000000001,1686919792000000000] 1686928116.94s 15mb |------L1.?------| "
+ - "L1.?[1686919792000000001,1686920683000000000] 1686928116.94s 1mb |L1.?|"
+ - "Committing partition 1:"
+ - " Soft Deleting 1 files: L1.2"
+ - " Creating 10 files"
+ - "**** Simulation run 1, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1686879262359644750, 1686884894719289500]). 6 Input Files, 258mb total:"
+ - "L1 "
+ - "L1.13[1686873630000000000,1686879712000000000] 1686928116.94s 9mb|---------------L1.13---------------| "
+ - "L1.14[1686879712000000001,1686885832000000000] 1686928116.94s 9mb |---------------L1.14---------------| "
+ - "L1.15[1686885832000000001,1686888172000000000] 1686928116.94s 4mb |---L1.15----| "
+ - "L2 "
+ - "L2.1[1686873630000000000,1686879712000000000] 1686927078.59s 99mb|---------------L2.1----------------| "
+ - "L2.3[1686879750000000000,1686885832000000000] 1686927078.59s 99mb |---------------L2.3----------------| "
+ - "L2.4[1686885870000000000,1686888172000000000] 1686927078.59s 38mb |----L2.4----| "
+ - "**** 3 Output Files (parquet_file_id not yet assigned), 258mb total:"
+ - "L2 "
+ - "L2.?[1686873630000000000,1686879262359644750] 1686928116.94s 100mb|--------------L2.?--------------| "
+ - "L2.?[1686879262359644751,1686884894719289500] 1686928116.94s 100mb |--------------L2.?--------------| "
+ - "L2.?[1686884894719289501,1686888172000000000] 1686928116.94s 58mb |-------L2.?-------| "
+ - "Committing partition 1:"
+ - " Soft Deleting 6 files: L2.1, L2.3, L2.4, L1.13, L1.14, L1.15"
+ - " Creating 3 files"
+ - "**** Final Output Files (329mb written)"
+ - "L1 "
+ - "L1.12[1686920730000000000,1686926803000000000] 1686928116.94s 37mb |-L1.12--| "
+ - "L1.16[1686888172000000001,1686894292000000000] 1686928116.94s 9mb |-L1.16--| "
+ - "L1.17[1686894292000000001,1686900412000000000] 1686928116.94s 9mb |-L1.17--| "
+ - "L1.18[1686900412000000001,1686901072000000000] 1686928116.94s 1mb |L1.18| "
+ - "L1.19[1686901072000000001,1686907132000000000] 1686928116.94s 9mb |-L1.19--| "
+ - "L1.20[1686907132000000001,1686910072000000000] 1686928116.94s 4mb |L1.20| "
+ - "L1.21[1686910072000000001,1686919792000000000] 1686928116.94s 15mb |----L1.21-----| "
+ - "L1.22[1686919792000000001,1686920683000000000] 1686928116.94s 1mb |L1.22| "
+ - "L2 "
+ - "L2.5[1686888210000000000,1686894292000000000] 1686927078.59s 99mb |--L2.5--| "
+ - "L2.6[1686894330000000000,1686900412000000000] 1686927078.59s 99mb |--L2.6--| "
+ - "L2.7[1686900450000000000,1686901072000000000] 1686927078.59s 12mb |L2.7| "
+ - "L2.8[1686901110000000000,1686907132000000000] 1686927078.59s 98mb |--L2.8--| "
+ - "L2.9[1686907170000000000,1686910072000000000] 1686927078.59s 49mb |L2.9| "
+ - "L2.10[1686910110000000000,1686919792000000000] 1686926864.32s 101mb |----L2.10-----| "
+ - "L2.11[1686919830000000000,1686926803000000000] 1686926864.32s 68mb |--L2.11--| "
+ - "L2.23[1686873630000000000,1686879262359644750] 1686928116.94s 100mb|-L2.23-| "
+ - "L2.24[1686879262359644751,1686884894719289500] 1686928116.94s 100mb |-L2.24-| "
+ - "L2.25[1686884894719289501,1686888172000000000] 1686928116.94s 58mb |L2.25| "
+ "###
+ );
+ // TODO(maybe): see matching comment in files_to_compact.rs/limit_files_to_compact
+ // The L1s left above are less than ideal, but maybe not bad. This scenario initially just barely met the criteria for compaction and started with splits
+ // to remove overlaps between L1 and L2 (that's good). Due to the grouping of files they didn't all get to compact in the first round (that's ok).
+ // But the first few that got compacted in the first round were enough to make the partition no longer meet the criteria for compaction, so the rest
+ // are left sitting there ready to compact with their L2s, but not quite getting to.
+ // The critical point is that this case doesn't loop forever anymore.
+}
|
6f1c6fa44a51f6398baa1719bc1992744c9557d2
|
Marco Neumann
|
2023-09-07 11:13:17
|
i->q V2 tracing integration (#8680)
|
* refactor: improve `TestLayer`
* feat: tracing layer
| null |
feat: i->q V2 tracing integration (#8680)
* refactor: improve `TestLayer`
* feat: tracing layer
|
diff --git a/ingester_query_client/Cargo.toml b/ingester_query_client/Cargo.toml
index 6860fd0b8a..68f6d857a3 100644
--- a/ingester_query_client/Cargo.toml
+++ b/ingester_query_client/Cargo.toml
@@ -16,6 +16,7 @@ http = "0.2.9"
ingester_query_grpc = { path = "../ingester_query_grpc" }
observability_deps = { path = "../observability_deps" }
snafu = "0.7"
+tokio = { version = "1.32" }
tonic = { workspace = true }
trace = { path = "../trace" }
trace_http = { path = "../trace_http" }
diff --git a/ingester_query_client/src/layers/deserialize.rs b/ingester_query_client/src/layers/deserialize.rs
index e422e62655..e652b02167 100644
--- a/ingester_query_client/src/layers/deserialize.rs
+++ b/ingester_query_client/src/layers/deserialize.rs
@@ -729,7 +729,13 @@ mod tests {
payload: [proto::QueryResponse; N],
) -> Result<QueryResponse<ResponseMetadata, ResponsePayload>, DynError> {
let l = TestLayer::<(), (), proto::QueryResponse>::default();
- l.mock_response(TestResponse::ok_payload((), payload));
+
+ let mut resp = TestResponse::ok(());
+ for p in payload {
+ resp = resp.with_ok_payload(p);
+ }
+ l.mock_response(resp);
+
let l = DeserializeLayer::new(l);
l.query(()).await
}
diff --git a/ingester_query_client/src/layers/logging.rs b/ingester_query_client/src/layers/logging.rs
index e4159f11a1..83a0b847a8 100644
--- a/ingester_query_client/src/layers/logging.rs
+++ b/ingester_query_client/src/layers/logging.rs
@@ -113,11 +113,12 @@ mod tests {
async fn test() {
let l = TestLayer::<(), (), ()>::default();
l.mock_response(TestResponse::err(DynError::from("error 1")));
- l.mock_response(TestResponse::ok_payload_res(
- (),
- [Ok(()), Err(DynError::from("error 2"))],
- ));
- l.mock_response(TestResponse::ok_payload((), [(), ()]));
+ l.mock_response(
+ TestResponse::ok(())
+ .with_ok_payload(())
+ .with_err_payload(DynError::from("error 2")),
+ );
+ l.mock_response(TestResponse::ok(()).with_ok_payload(()).with_ok_payload(()));
let l = LoggingLayer::new(l, "foo.bar".into());
let capture = TracingCapture::new();
diff --git a/ingester_query_client/src/layers/mod.rs b/ingester_query_client/src/layers/mod.rs
index 95adb0c52a..e489d7907d 100644
--- a/ingester_query_client/src/layers/mod.rs
+++ b/ingester_query_client/src/layers/mod.rs
@@ -5,3 +5,4 @@ pub mod logging;
pub mod network;
pub mod serialize;
pub mod testing;
+pub mod tracing;
diff --git a/ingester_query_client/src/layers/testing.rs b/ingester_query_client/src/layers/testing.rs
index 23ce314da5..5b9785d9fe 100644
--- a/ingester_query_client/src/layers/testing.rs
+++ b/ingester_query_client/src/layers/testing.rs
@@ -1,8 +1,12 @@
//! Testing layer.
-use std::{fmt::Debug, sync::Mutex};
+use std::{
+ fmt::Debug,
+ sync::{Arc, Mutex},
+};
use async_trait::async_trait;
use futures::StreamExt;
+use tokio::sync::Barrier;
use crate::{
error::DynError,
@@ -16,6 +20,7 @@ where
ResponseMetadata: Clone + Debug + Send + Sync + 'static,
ResponsePayload: Clone + Debug + Send + Sync + 'static,
{
+ barriers: Vec<Arc<Barrier>>,
res: Result<(ResponseMetadata, Vec<Result<ResponsePayload, DynError>>), DynError>,
}
@@ -27,30 +32,43 @@ where
/// Create OK response w/o any payload.
pub fn ok(md: ResponseMetadata) -> Self {
Self {
+ barriers: vec![],
res: Ok((md, vec![])),
}
}
/// Create ERR response w/o any payload.
pub fn err(e: DynError) -> Self {
- Self { res: Err(e) }
- }
-
- /// Create OK response w/ OK payload.
- pub fn ok_payload<const N: usize>(md: ResponseMetadata, payload: [ResponsePayload; N]) -> Self {
Self {
- res: Ok((md, payload.into_iter().map(Ok).collect())),
+ barriers: vec![],
+ res: Err(e),
}
}
- /// Create OK response w/ [`Result`]-typed payload.
- pub fn ok_payload_res<const N: usize>(
- md: ResponseMetadata,
- payload: [Result<ResponsePayload, DynError>; N],
- ) -> Self {
- Self {
- res: Ok((md, payload.into_iter().collect())),
- }
+ /// Add OK payload response.
+ ///
+ /// # Panic
+ /// This is only legal for [`ok`](Self::ok) responses.
+ pub fn with_ok_payload(mut self, payload: ResponsePayload) -> Self {
+ self.res.as_mut().expect("ok status").1.push(Ok(payload));
+ self
+ }
+
+ /// Add Err payload response.
+ ///
+ /// # Panic
+ /// This is only legal for [`ok`](Self::ok) responses.
+ pub fn with_err_payload(mut self, e: DynError) -> Self {
+ self.res.as_mut().expect("ok status").1.push(Err(e));
+ self
+ }
+
+ /// Add barrier to initial response (i.e. BEFORE the metadata returns).
+ ///
+ /// Multiple barriers can be stacked, e.g. to check if the response is pending and then block it.
+ pub fn with_initial_barrier(mut self, barrier: Arc<Barrier>) -> Self {
+ self.barriers.push(barrier);
+ self
}
}
@@ -142,9 +160,13 @@ where
};
// assert AFTER dropping the lock guard
- let response = maybe_response.expect("no response left");
+ let TestResponse { barriers, res } = maybe_response.expect("no response left");
+
+ for barrier in barriers {
+ barrier.wait().await;
+ }
- response.res.map(|(metadata, payload)| QueryResponse {
+ res.map(|(metadata, payload)| QueryResponse {
metadata,
payload: futures::stream::iter(payload).boxed(),
})
diff --git a/ingester_query_client/src/layers/tracing.rs b/ingester_query_client/src/layers/tracing.rs
new file mode 100644
index 0000000000..240795c804
--- /dev/null
+++ b/ingester_query_client/src/layers/tracing.rs
@@ -0,0 +1,249 @@
+//! Tracing layer.
+
+use async_trait::async_trait;
+use futures::StreamExt;
+use std::{fmt::Debug, sync::Arc, task::Poll};
+use trace::span::{Span, SpanRecorder};
+
+use crate::{
+ error::DynError,
+ layer::{Layer, QueryResponse},
+};
+
+/// Tracing layer.
+#[derive(Debug)]
+pub struct TracingLayer<L, R>
+where
+ L: Layer<Request = (R, Option<Span>)>,
+ R: Clone + Debug + Send + Sync + 'static,
+{
+ addr: Arc<str>,
+ inner: L,
+}
+
+impl<L, R> TracingLayer<L, R>
+where
+ L: Layer<Request = (R, Option<Span>)>,
+ R: Clone + Debug + Send + Sync + 'static,
+{
+ /// Create new tracing wrapper.
+ pub fn new(inner: L, addr: Arc<str>) -> Self {
+ Self { addr, inner }
+ }
+}
+
+#[async_trait]
+impl<L, R> Layer for TracingLayer<L, R>
+where
+ L: Layer<Request = (R, Option<Span>)>,
+ R: Clone + Debug + Send + Sync + 'static,
+{
+ type Request = (R, Option<Span>);
+ type ResponseMetadata = L::ResponseMetadata;
+ type ResponsePayload = L::ResponsePayload;
+
+ async fn query(
+ &self,
+ request: Self::Request,
+ ) -> Result<QueryResponse<Self::ResponseMetadata, Self::ResponsePayload>, DynError> {
+ let (r, span) = request;
+
+ let mut tracker = CancelationTracker {
+ span_recorder: SpanRecorder::new(span),
+ done: false,
+ };
+
+ tracker
+ .span_recorder
+ .set_metadata("addr", self.addr.as_ref().to_owned());
+
+ match self
+ .inner
+ .query((r, tracker.span_recorder.child_span("ingester request")))
+ .await
+ {
+ Ok(QueryResponse {
+ metadata,
+ mut payload,
+ }) => {
+ tracker
+ .span_recorder
+ .event("ingester response stream starts");
+
+ Ok(QueryResponse {
+ metadata,
+ payload: futures::stream::poll_fn(move |cx| {
+ let res = payload.poll_next_unpin(cx);
+
+ match &res {
+ Poll::Ready(Some(Ok(_))) => {
+ tracker
+ .span_recorder
+ .event("ingester response stream response");
+ }
+ Poll::Ready(Some(Err(e))) => {
+ tracker.span_recorder.error(e.to_string());
+ tracker.done = true;
+ }
+ Poll::Ready(None) => {
+ tracker.span_recorder.ok("ingester response stream end");
+ tracker.done = true;
+ }
+ Poll::Pending => {}
+ }
+
+ res
+ })
+ .boxed(),
+ })
+ }
+ Err(e) => {
+ tracker.span_recorder.error(e.to_string());
+ tracker.done = true;
+ Err(e)
+ }
+ }
+ }
+}
+
+struct CancelationTracker {
+ span_recorder: SpanRecorder,
+ done: bool,
+}
+
+impl Drop for CancelationTracker {
+ fn drop(&mut self) {
+ if !self.done {
+ self.span_recorder.event("cancelled");
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use futures::TryStreamExt;
+ use std::fmt::Write;
+ use tokio::{sync::Barrier, task::JoinSet};
+ use trace::RingBufferTraceCollector;
+
+ use crate::{
+ layers::testing::{TestLayer, TestResponse},
+ testing::span,
+ };
+
+ use super::*;
+
+ #[tokio::test]
+ async fn test() {
+ let barrier_1 = Arc::new(Barrier::new(2));
+ let barrier_2 = Arc::new(Barrier::new(2));
+
+ let l = TestLayer::<((), Option<Span>), (), ()>::default();
+ l.mock_response(TestResponse::err(DynError::from("error 1")));
+ l.mock_response(
+ TestResponse::ok(())
+ .with_ok_payload(())
+ .with_err_payload(DynError::from("error 2")),
+ );
+ l.mock_response(TestResponse::ok(()).with_ok_payload(()).with_ok_payload(()));
+ l.mock_response(
+ TestResponse::ok(())
+ .with_initial_barrier(Arc::clone(&barrier_1))
+ .with_initial_barrier(barrier_2),
+ );
+ let l = TracingLayer::new(l, "foo.bar".into());
+
+ let collector = Arc::new(RingBufferTraceCollector::new(100));
+
+ let mut span = span();
+ span.ctx.collector = Some(Arc::clone(&collector) as _);
+
+ l.query(((), Some(span.clone()))).await.unwrap_err();
+ l.query(((), Some(span.clone())))
+ .await
+ .unwrap()
+ .payload
+ .try_collect::<Vec<_>>()
+ .await
+ .unwrap_err();
+ l.query(((), Some(span.clone())))
+ .await
+ .unwrap()
+ .payload
+ .try_collect::<Vec<_>>()
+ .await
+ .unwrap();
+
+ let mut join_set = JoinSet::new();
+ join_set.spawn(async move {
+ l.query(((), Some(span))).await.unwrap();
+ unreachable!("request should have been cancelled");
+ });
+
+ barrier_1.wait().await;
+ join_set.shutdown().await;
+
+ assert_eq!(
+ format_spans(&collector.spans()).trim(),
+ [
+ "span:",
+ " name: span",
+ " status: Err",
+ " metadata:",
+ " addr: foo.bar",
+ " events:",
+ " error 1",
+ "span:",
+ " name: span",
+ " status: Err",
+ " metadata:",
+ " addr: foo.bar",
+ " events:",
+ " ingester response stream starts",
+ " ingester response stream response",
+ " error 2",
+ "span:",
+ " name: span",
+ " status: Ok",
+ " metadata:",
+ " addr: foo.bar",
+ " events:",
+ " ingester response stream starts",
+ " ingester response stream response",
+ " ingester response stream response",
+ " ingester response stream end",
+ "span:",
+ " name: span",
+ " status: Unknown",
+ " metadata:",
+ " addr: foo.bar",
+ " events:",
+ " cancelled",
+ ]
+ .join("\n")
+ );
+ }
+
+ fn format_spans(spans: &[Span]) -> String {
+ let mut out = String::new();
+
+ for span in spans {
+ writeln!(&mut out, "span:",).unwrap();
+
+ writeln!(&mut out, " name: {}", span.name).unwrap();
+ writeln!(&mut out, " status: {:?}", span.status).unwrap();
+
+ writeln!(&mut out, " metadata:").unwrap();
+ for (k, v) in &span.metadata {
+ writeln!(&mut out, " {}: {}", k, v.string().unwrap_or_default(),).unwrap();
+ }
+
+ writeln!(&mut out, " events:").unwrap();
+ for evt in &span.events {
+ writeln!(&mut out, " {}", evt.msg,).unwrap();
+ }
+ }
+
+ out
+ }
+}
|
3969b4092545ab5a9b7069a46611243d6629a4cf
|
Marco Neumann
|
2023-08-02 16:42:33
|
avoid recursive locking during LRU shutdown (#8395)
|
* test: regression test for #8378
* fix: avoid recursive locking during LRU shutdown
Fixes the following construct during shutdown:
1. `clean_up_loop` holds `members` lock
2. calls `member.remove_keys`
3. `CallbackHandle::execute_requests` requests upgrades weak ref and gets lock
4. other thread drops last external reference to pool member, the
upgraded weak ref from (3) is now the last strong ref
5. `CallbackHandle::execute_requests` finishes, drops pool member
6. dropping that pool member calls `ResourcePool::unregister_member`
which is the same lock as we got in (1) => deadlock
We now just avoid modifying `members` during shutdown and just hold a
weak ref there. As a side effect, the `last_used` addressable heap moves
around a bit an is no longer `Arc`ed (see updated diagram).
---------
|
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
fix: avoid recursive locking during LRU shutdown (#8395)
* test: regression test for #8378
* fix: avoid recursive locking during LRU shutdown
Fixes the following construct during shutdown:
1. `clean_up_loop` holds `members` lock
2. calls `member.remove_keys`
3. `CallbackHandle::execute_requests` requests upgrades weak ref and gets lock
4. other thread drops last external reference to pool member, the
upgraded weak ref from (3) is now the last strong ref
5. `CallbackHandle::execute_requests` finishes, drops pool member
6. dropping that pool member calls `ResourcePool::unregister_member`
which is the same lock as we got in (1) => deadlock
We now just avoid modifying `members` during shutdown and just hold a
weak ref there. As a side effect, the `last_used` addressable heap moves
around a bit an is no longer `Arc`ed (see updated diagram).
---------
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
diff --git a/cache_system/src/backend/policy/lru.rs b/cache_system/src/backend/policy/lru.rs
index 226205bec5..eab2359485 100644
--- a/cache_system/src/backend/policy/lru.rs
+++ b/cache_system/src/backend/policy/lru.rs
@@ -190,14 +190,16 @@
//! | | |
//! V (mutex) |
//! .~~~~~~~~~~~~~~~. .~~~~~~~~~~~. | .~~~~~~~~~~~~~~~~. .~~~~~~~~~~~~.
-//! -->: PolicyBackend :--->: LruPolicy : (arc) : PoolMemberImpl : : PoolMember :
+//! -->: PolicyBackend :--->: LruPolicy : | : PoolMemberImpl : : PoolMember :
//! : <K, V> : : <K, V, S> : | : <K, V, S> : : <S> :
-//! : : : :------+------: :<--(dyn)---: :
+//! : : : : +------: :<--(dyn)---: :
//! .~~~~~~~~~~~~~~~. .~~~~~~~~~~~. .~~~~~~~~~~~~~~~~. .~~~~~~~~~~~~.
-//! | ^
-//! (arc) |
-//! | |
-//! V |
+//! | | ^ ^
+//! | | | |
+//! | +--------------------------------------(arc)-----+ |
+//! (arc) |
+//! | (weak)
+//! V |
//! .~~~~~~~~~~~~~~. .~~~~~~~~~~~~~.
//! ---------------------->: ResourcePool :-----+-------(arc)--------------------->: SharedState :
//! : <S> : | : <S> :
@@ -300,7 +302,7 @@ use std::{
collections::{btree_map::Entry, BTreeMap},
fmt::Debug,
hash::Hash,
- sync::Arc,
+ sync::{Arc, Weak},
};
use iox_time::Time;
@@ -362,12 +364,35 @@ where
current: Mutex<MeasuredT<S>>,
/// Members (= backends) that use this pool.
- members: Mutex<BTreeMap<&'static str, Box<dyn PoolMember<S = S>>>>,
+ members: Mutex<BTreeMap<&'static str, Weak<dyn PoolMember<S = S>>>>,
/// Notification when [`current`](Self::current) as changed.
change_notify: Notify,
}
+impl<S> SharedState<S>
+where
+ S: Resource,
+{
+ /// Get current members.
+ ///
+ /// This also performs a clean-up.
+ fn members(&self) -> BTreeMap<&'static str, Arc<dyn PoolMember<S = S>>> {
+ let mut members = self.members.lock();
+ let mut out = BTreeMap::new();
+
+ members.retain(|id, member| match member.upgrade() {
+ Some(member) => {
+ out.insert(*id, member);
+ true
+ }
+ None => false,
+ });
+
+ out
+ }
+}
+
/// Resource pool.
///
/// This can be used with [`LruPolicy`].
@@ -455,29 +480,23 @@ where
///
/// # Panic
/// Panics when a member with the specific ID is already registered.
- fn register_member(&self, id: &'static str, member: Box<dyn PoolMember<S = S>>) {
+ fn register_member(&self, id: &'static str, member: Weak<dyn PoolMember<S = S>>) {
let mut members = self.shared.members.lock();
match members.entry(id) {
Entry::Vacant(v) => {
v.insert(member);
}
- Entry::Occupied(o) => {
- panic!("Member '{}' already registered", o.key());
+ Entry::Occupied(mut o) => {
+ if o.get().strong_count() > 0 {
+ panic!("Member '{}' already registered", o.key());
+ } else {
+ *o.get_mut() = member;
+ }
}
}
}
- /// Unregister pool member.
- ///
- /// # Panic
- /// Panics when the member with the specified ID is unknown (or was already unregistered).
- fn unregister_member(&self, id: &str) {
- let mut members = self.shared.members.lock();
-
- assert!(members.remove(id).is_some(), "Member '{id}' unknown");
- }
-
/// Add used resource from pool.
fn add(&self, s: S) {
let mut current = self.shared.current.lock();
@@ -518,21 +537,15 @@ where
V: Clone + Debug + Send + 'static,
S: Resource,
{
- /// Pool member ID.
- id: &'static str,
-
/// Link to central resource pool.
pool: Arc<ResourcePool<S>>,
+ /// Pool member
+ member: Arc<PoolMemberImpl<K, V, S>>,
+
/// Resource estimator that is used for new (via [`SET`](Subscriber::set)) entries.
resource_estimator: Arc<dyn ResourceEstimator<K = K, V = V, S = S>>,
- /// Tracks when an element was used last.
- ///
- /// This is shared with [`PoolMemberImpl`], because [`clean_up_loop`] uses it via [`PoolMember::could_remove`] to
- /// select victims for LRU evictions.
- last_used: Arc<Mutex<AddressableHeap<K, S, Time>>>,
-
/// Count number of elements within this specific pool member.
metric_count: U64Gauge,
@@ -584,23 +597,19 @@ where
move |mut callback_handle| {
callback_handle.execute_requests(vec![ChangeRequest::ensure_empty()]);
- let last_used = Arc::new(Mutex::new(AddressableHeap::new()));
-
- pool.register_member(
+ let member = Arc::new(PoolMemberImpl {
id,
- Box::new(PoolMemberImpl {
- id,
- last_used: Arc::clone(&last_used),
- metric_evicted,
- callback_handle: Mutex::new(callback_handle),
- }),
- );
+ last_used: Mutex::new(AddressableHeap::new()),
+ metric_evicted,
+ callback_handle: Mutex::new(callback_handle),
+ });
+
+ pool.register_member(id, Arc::downgrade(&member) as _);
Self {
- id,
pool,
+ member,
resource_estimator,
- last_used,
metric_count,
metric_usage,
}
@@ -615,7 +624,15 @@ where
S: Resource,
{
fn drop(&mut self) {
- self.pool.unregister_member(self.id);
+ let size_total = {
+ let mut guard = self.member.last_used.lock();
+ let mut accu = S::zero();
+ while let Some((_k, s, _t)) = guard.pop() {
+ accu = accu + s;
+ }
+ accu
+ };
+ self.pool.remove(size_total);
}
}
@@ -630,7 +647,7 @@ where
fn get(&mut self, k: &Self::K, now: Time) -> Vec<ChangeRequest<'static, Self::K, Self::V>> {
trace!(?k, now = now.timestamp(), "LRU get",);
- let mut last_used = self.last_used.lock();
+ let mut last_used = self.member.last_used.lock();
// update "last used"
last_used.update_order(k, now);
@@ -659,7 +676,7 @@ where
}
{
- let mut last_used = self.last_used.lock();
+ let mut last_used = self.member.last_used.lock();
// maybe clean from pool
if let Some((consumption, last_used_t_previously)) = last_used.remove(k) {
@@ -686,7 +703,7 @@ where
fn remove(&mut self, k: &Self::K, now: Time) -> Vec<ChangeRequest<'static, Self::K, Self::V>> {
trace!(?k, now = now.timestamp(), "LRU remove",);
- let mut last_used = self.last_used.lock();
+ let mut last_used = self.member.last_used.lock();
if let Some((consumption, _last_used)) = last_used.remove(k) {
self.pool.remove(consumption);
@@ -702,7 +719,7 @@ where
///
/// The only implementation of this is [`PoolMemberImpl`]. This indirection is required to erase `K` and `V` from specific
/// backend so we can stick it into the generic pool.
-trait PoolMember: Debug + Send + 'static {
+trait PoolMember: Debug + Send + Sync + 'static {
/// Resource type.
type S;
@@ -739,7 +756,7 @@ where
/// Tracks usage of the last used elements.
///
/// See documentation of [`callback_handle`](Self::callback_handle) for a reasoning about locking.
- last_used: Arc<Mutex<AddressableHeap<K, S, Time>>>,
+ last_used: Mutex<AddressableHeap<K, S, Time>>,
/// Handle to call back into the [`PolicyBackend`] to evict data.
///
@@ -839,10 +856,9 @@ async fn clean_up_loop<S>(
}
}
- // hold member lock
- // this is OK since this is only modified when new members are added. The members itself do NOT interact with
- // this value.
- let members = shared.members.lock();
+ // receive members
+ // Do NOT hold the member lock during the deletion later because this can lead to deadlocks during shutdown.
+ let members = shared.members();
// select victims
let mut victims: BTreeMap<&'static str, Vec<Box<dyn Any>>> = Default::default();
@@ -866,9 +882,7 @@ async fn clean_up_loop<S>(
}
for (id, keys) in victims {
- let member = members
- .get(id)
- .expect("did not drop the lock in the meantime");
+ let member = members.get(id).expect("did get this ID from this map");
member.remove_keys(keys);
}
}
@@ -956,15 +970,21 @@ mod tests {
"id",
Arc::clone(&resource_estimator) as _,
));
+ backend1.set(String::from("a"), 1usize);
+ assert_eq!(pool.current(), TestSize(1));
// drop the backend so re-registering the same ID ("id") MUST NOT panic
drop(backend1);
+ assert_eq!(pool.current(), TestSize(0));
+
let mut backend2 = PolicyBackend::hashmap_backed(time_provider);
backend2.add_policy(LruPolicy::new(
Arc::clone(&pool),
"id",
Arc::clone(&resource_estimator) as _,
));
+ backend2.set(String::from("a"), 2usize);
+ assert_eq!(pool.current(), TestSize(2));
}
#[tokio::test]
@@ -1606,9 +1626,18 @@ mod tests {
});
}
- /// Regression test for <https://github.com/influxdata/influxdb_iox/issues/8334>.
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn test_deadlock() {
+ // Regression test for <https://github.com/influxdata/influxdb_iox/issues/8334>.
+ test_deadlock_inner(Duration::from_secs(1)).await;
+
+ // Regression test for <https://github.com/influxdata/influxdb_iox/issues/8378>
+ for _ in 0..100 {
+ test_deadlock_inner(Duration::from_millis(1)).await;
+ }
+ }
+
+ async fn test_deadlock_inner(test_duration: Duration) {
#[derive(Debug)]
struct OneSizeProvider {}
@@ -1662,7 +1691,7 @@ mod tests {
}
});
- tokio::time::sleep(Duration::from_secs(1)).await;
+ tokio::time::sleep(test_duration).await;
worker1.abort();
worker2.abort();
|
aa7a38be5555a2d67dffc7bfb12415c8de8c0f88
|
Marco Neumann
|
2023-07-31 15:04:34
|
re-design LRU cache to be deadlock-free (#8345)
|
* fix: re-design LRU cache to be deadlock-free
Fixes #8334.
* test: explain test
* test: add regression test
* docs: extend "overdelete" section
| null |
fix: re-design LRU cache to be deadlock-free (#8345)
* fix: re-design LRU cache to be deadlock-free
Fixes #8334.
* test: explain test
* test: add regression test
* docs: extend "overdelete" section
|
diff --git a/Cargo.lock b/Cargo.lock
index b46fa0962e..c523d7a50f 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -763,6 +763,7 @@ dependencies = [
"pdatastructs",
"proptest",
"rand",
+ "test_helpers",
"tokio",
"tokio-util",
"trace",
diff --git a/cache_system/Cargo.toml b/cache_system/Cargo.toml
index ce651cffb9..d2935a8447 100644
--- a/cache_system/Cargo.toml
+++ b/cache_system/Cargo.toml
@@ -23,6 +23,7 @@ workspace-hack = { version = "0.1", path = "../workspace-hack" }
[dev-dependencies]
criterion = { version = "0.5", default-features = false, features = ["rayon"]}
proptest = { version = "1", default_features = false, features = ["std"] }
+test_helpers = { path = "../test_helpers" }
[lib]
# Allow --save-baseline to work
diff --git a/cache_system/src/backend/policy/integration_tests.rs b/cache_system/src/backend/policy/integration_tests.rs
index a303c23fbe..c99a2d06ba 100644
--- a/cache_system/src/backend/policy/integration_tests.rs
+++ b/cache_system/src/backend/policy/integration_tests.rs
@@ -5,6 +5,7 @@ use std::{collections::HashMap, sync::Arc, time::Duration};
use iox_time::{MockProvider, Time};
use parking_lot::Mutex;
use rand::rngs::mock::StepRng;
+use test_helpers::maybe_start_logging;
use tokio::{runtime::Handle, sync::Notify};
use crate::{
@@ -116,6 +117,7 @@ async fn test_refresh_does_not_update_lru_time() {
time_provider,
loader,
notify_idle,
+ pool,
..
} = TestStateLruAndRefresh::new();
@@ -135,12 +137,14 @@ async fn test_refresh_does_not_update_lru_time() {
let barrier = loader.block_next(1, String::from("foo"));
backend.set(1, String::from("a"));
+ pool.wait_converged().await;
// trigger refresh
time_provider.inc(Duration::from_secs(1));
time_provider.inc(Duration::from_secs(1));
backend.set(2, String::from("b"));
+ pool.wait_converged().await;
time_provider.inc(Duration::from_secs(1));
@@ -150,6 +154,7 @@ async fn test_refresh_does_not_update_lru_time() {
// add a third item to the cache, forcing LRU to evict one of the items
backend.set(3, String::from("c"));
+ pool.wait_converged().await;
// Should evict `1` even though it was refreshed after `2` was added
assert_eq!(backend.get(&1), None);
@@ -192,6 +197,8 @@ async fn test_if_refresh_to_slow_then_expire() {
#[tokio::test]
async fn test_refresh_can_trigger_lru_eviction() {
+ maybe_start_logging();
+
let TestStateLRUAndRefresh {
mut backend,
refresh_duration_provider,
@@ -224,13 +231,16 @@ async fn test_refresh_can_trigger_lru_eviction() {
backend.set(1, String::from("a"));
backend.set(2, String::from("c"));
backend.set(3, String::from("d"));
- assert_eq!(backend.get(&1), Some(String::from("a")));
+ pool.wait_converged().await;
assert_eq!(backend.get(&2), Some(String::from("c")));
assert_eq!(backend.get(&3), Some(String::from("d")));
+ time_provider.inc(Duration::from_millis(1));
+ assert_eq!(backend.get(&1), Some(String::from("a")));
// refresh
- time_provider.inc(Duration::from_secs(1));
+ time_provider.inc(Duration::from_secs(10));
notify_idle.notified_with_timeout().await;
+ pool.wait_converged().await;
// needed to evict 2->"c"
assert_eq!(backend.get(&1), Some(String::from("b")));
@@ -285,6 +295,7 @@ async fn test_remove_if_check_does_not_extend_lifetime() {
size_estimator,
time_provider,
remove_if_handle,
+ pool,
..
} = TestStateLruAndRemoveIf::new().await;
@@ -293,15 +304,18 @@ async fn test_remove_if_check_does_not_extend_lifetime() {
size_estimator.mock_size(3, String::from("c"), TestSize(4));
backend.set(1, String::from("a"));
+ pool.wait_converged().await;
time_provider.inc(Duration::from_secs(1));
backend.set(2, String::from("b"));
+ pool.wait_converged().await;
time_provider.inc(Duration::from_secs(1));
// Checking remove_if should not count as a "use" of 1
// for the "least recently used" calculation
remove_if_handle.remove_if(&1, |_| false);
backend.set(3, String::from("c"));
+ pool.wait_converged().await;
// adding "c" totals 12 size, but backend has room for only 10
// so "least recently used" (in this case 1, not 2) should be removed
@@ -397,6 +411,7 @@ impl TestStateLRUAndRefresh {
"my_pool",
TestSize(10),
Arc::clone(&metric_registry),
+ &Handle::current(),
));
backend.add_policy(LruPolicy::new(
Arc::clone(&pool),
@@ -442,6 +457,7 @@ impl TestStateTtlAndLRU {
"my_pool",
TestSize(10),
Arc::clone(&metric_registry),
+ &Handle::current(),
));
backend.add_policy(LruPolicy::new(
Arc::clone(&pool),
@@ -465,6 +481,7 @@ struct TestStateLruAndRemoveIf {
time_provider: Arc<MockProvider>,
size_estimator: Arc<TestSizeEstimator>,
remove_if_handle: RemoveIfHandle<u8, String>,
+ pool: Arc<ResourcePool<TestSize>>,
}
impl TestStateLruAndRemoveIf {
@@ -479,6 +496,7 @@ impl TestStateLruAndRemoveIf {
"my_pool",
TestSize(10),
Arc::clone(&metric_registry),
+ &Handle::current(),
));
backend.add_policy(LruPolicy::new(
Arc::clone(&pool),
@@ -495,6 +513,7 @@ impl TestStateLruAndRemoveIf {
time_provider,
size_estimator,
remove_if_handle,
+ pool,
}
}
}
@@ -507,6 +526,7 @@ struct TestStateLruAndRefresh {
time_provider: Arc<MockProvider>,
loader: Arc<TestLoader<u8, (), String>>,
notify_idle: Arc<Notify>,
+ pool: Arc<ResourcePool<TestSize>>,
}
impl TestStateLruAndRefresh {
@@ -537,6 +557,7 @@ impl TestStateLruAndRefresh {
"my_pool",
TestSize(10),
Arc::clone(&metric_registry),
+ &Handle::current(),
));
backend.add_policy(LruPolicy::new(
Arc::clone(&pool),
@@ -551,6 +572,7 @@ impl TestStateLruAndRefresh {
time_provider,
loader,
notify_idle,
+ pool,
}
}
}
diff --git a/cache_system/src/backend/policy/lru.rs b/cache_system/src/backend/policy/lru.rs
index 6d9d88e8d5..226205bec5 100644
--- a/cache_system/src/backend/policy/lru.rs
+++ b/cache_system/src/backend/policy/lru.rs
@@ -3,6 +3,7 @@
//! # Usage
//!
//! ```
+//! # tokio::runtime::Runtime::new().unwrap().block_on(async {
//! use std::{
//! collections::HashMap,
//! ops::{Add, Sub},
@@ -19,6 +20,7 @@
//! },
//! resource_consumption::{Resource, ResourceEstimator},
//! };
+//! use tokio::runtime::Handle;
//!
//! // first we implement a strongly-typed RAM size measurement
//! #[derive(Debug, Clone, Copy, PartialEq, PartialOrd)]
@@ -68,6 +70,7 @@
//! "my_pool",
//! limit,
//! metric_registry,
+//! &Handle::current(),
//! ));
//!
//! // set up first pool user: a u64->String map
@@ -106,6 +109,11 @@
//!
//! // fill up pool
//! backend1.set(3, String::from("this_will_evict_data"));
+//!
+//! // the policy will eventually evict the data, in tests we can use a help
+//! // method to wait for that
+//! pool.wait_converged().await;
+//!
//! assert!(backend1.get(&1).is_some());
//! assert!(backend1.get(&2).is_none());
//! assert!(backend1.get(&3).is_some());
@@ -139,11 +147,13 @@
//!
//! // eviction works for all pool members
//! backend2.set(1, vec![1, 2, 3, 4]);
+//! pool.wait_converged().await;
//! assert!(backend1.get(&1).is_none());
//! assert!(backend1.get(&2).is_none());
//! assert!(backend1.get(&3).is_some());
//! assert!(backend2.get(&1).is_some());
//! assert_eq!(pool.current(), RamSize(33));
+//! # });
//! ```
//!
//! # Internals
@@ -154,8 +164,8 @@
//!
//! - **Single Pool:** Have a single resource pool for multiple LRU backends.
//! - **Eviction Cascade:** Adding data to any of the backends (or modifying an existing entry) should check if there is
-//! enough space left in the LRU backend. If not, we must remove the least recently used entries over all backends
-//! (including the one that just got a new entry) until there is enough space.
+//! enough space left in the LRU backend. If not, we must EVENTUALLY remove the least recently used entries over all
+//! backends (including the one that just got a new entry) until there is enough space.
//!
//! This has the following consequences:
//!
@@ -167,49 +177,39 @@
//! ## Data Structures
//!
//! ```text
-//! .~~~~~~~~~~~~~~. .~~~~~~~~~~~~~~~~~~~.
-//! ------------->: ResourcePool :--(mutex)-->: ResourcePoolInner :-------------------------------+
-//! : <S> : : <S> : |
-//! .~~~~~~~~~~~~~~. .~~~~~~~~~~~~~~~~~~~. |
-//! ^ |
-//! | |
-//! (arc) |
-//! | |
-//! | |
-//! | .~~~~~~~~~~~~~~~~~. .~~~~~~~~~~~~~~~~~~~~~. .~~~~~~~~~~~~~~~~~. |
-//! | : LruPolicyInner :<--: PoolMemberGuardImpl :<-(dyn)-: PoolMemberGuard : |
-//! | : <K1, V1, S> : : <K1, V1, S> : : <S> : |
-//! | .~~~~~~~~~~~~~~~~~. .~~~~~~~~~~~~~~~~~~~~~. .~~~~~~~~~~~~~~~~~. |
-//! | ^ ^ ^ |
-//! | | | | |
-//! | | +-------------+-------------+ |
-//! | | (call lock) |
-//! | | +-------------+-------------+ |
-//! | (mutex) | | |
-//! .~~~~~~~~~~~~~. | | .~~~~~~~~~~~~~~~~. .~~~~~~~~~~~~. |
-//! ->: LruPolicy :-+ (arc) : PoolMemberImpl : : PoolMember :<---+
-//! : <K1, V1, S> : | | : <K1, V1, S> : : <S> : |
-//! : :----------+-------------------: :<--(dyn)---: : |
-//! .~~~~~~~~~~~~~. | .~~~~~~~~~~~~~~~~. .~~~~~~~~~~~~. |
-//! | |
-//! | |
-//! | |
-//! | |
-//! | .~~~~~~~~~~~~~~~~~. .~~~~~~~~~~~~~~~~~~~~~. .~~~~~~~~~~~~~~~~~. |
-//! | : LruPolicyInner :<--: PoolMemberGuardImpl :<-(dyn)-: PoolMemberGuard : |
-//! | : <K2, V2, S> : : <K2, V2, S> : : <S> : |
-//! | .~~~~~~~~~~~~~~~~~. .~~~~~~~~~~~~~~~~~~~~~. .~~~~~~~~~~~~~~~~~. |
-//! | ^ ^ ^ |
-//! | | | | |
-//! | | +-------------+-------------+ |
-//! | | (call lock) |
-//! | | +-------------+-------------+ |
-//! | (mutex) | | |
-//! .~~~~~~~~~~~~~. | | .~~~~~~~~~~~~~~~~. .~~~~~~~~~~~~. |
-//! ->: LruPolicy :-+ (arc) : PoolMemberImpl : : PoolMember :<---+
-//! : <K2, V2, S> : | : <K2, V2, S> : : <S> :
-//! : :----------+-------------------: :<--(dyn)---: :
-//! .~~~~~~~~~~~~~. .~~~~~~~~~~~~~~~~. .~~~~~~~~~~~~.
+//! .~~~~~~~~~~~~~~~~.
+//! +---------------------------------------: CallbackHandle :
+//! | : <K, V> :
+//! | .~~~~~~~~~~~~~~~~.
+//! | ^
+//! | .~~~~~~~~~~~~~~~~~. |
+//! | : AddressableHeap : |
+//! | : <K, S, Time> : (mutex)
+//! | .~~~~~~~~~~~~~~~~~. |
+//! | ^ |
+//! | | |
+//! V (mutex) |
+//! .~~~~~~~~~~~~~~~. .~~~~~~~~~~~. | .~~~~~~~~~~~~~~~~. .~~~~~~~~~~~~.
+//! -->: PolicyBackend :--->: LruPolicy : (arc) : PoolMemberImpl : : PoolMember :
+//! : <K, V> : : <K, V, S> : | : <K, V, S> : : <S> :
+//! : : : :------+------: :<--(dyn)---: :
+//! .~~~~~~~~~~~~~~~. .~~~~~~~~~~~. .~~~~~~~~~~~~~~~~. .~~~~~~~~~~~~.
+//! | ^
+//! (arc) |
+//! | |
+//! V |
+//! .~~~~~~~~~~~~~~. .~~~~~~~~~~~~~.
+//! ---------------------->: ResourcePool :-----+-------(arc)--------------------->: SharedState :
+//! : <S> : | : <S> :
+//! .~~~~~~~~~~~~~~. | .~~~~~~~~~~~~~.
+//! | |
+//! (handle) |
+//! | |
+//! V |
+//! .~~~~~~~~~~~~~~~. |
+//! : clean_up_loop :----+
+//! : <S> :
+//! .~~~~~~~~~~~~~~~.
//! ```
//!
//! ## State
@@ -229,125 +229,129 @@
//!
//! ### Get
//! For [`GET`] we only need to update the "last used" timestamp for the affected entry. No
-//! pool-wide operations are required. We just [`LruPolicyInner`] and perform the read operation of the inner backend
-//! and the modification of the "last used" timestamp.
+//! pool-wide operations are required. We update [`AddressableHeap`] and then perform the read operation of the inner
+//! backend.
//!
//! ### Remove
//! For [`REMOVE`] the pool usage can only decrease, so other backends are never affected. We
-//! first lock [`LruPolicyInner`] and check if the entry is present. If it is, we also lock [`ResourcePoolInner`]
-//! and then perform the modification on both.
+//! first lock [`AddressableHeap`] and check if the entry is present. If it is, we also the "current" counter in
+//! [`SharedState`] and then perform the modification on both.
//!
//! ### Set
-//! [`SET`] is the most complex operation and requires a bit of a lock dance:
+//! [`SET`] locks [`AddressableHeap`] to figure out if th item exists. If it does, it locks the "current" counter in
+//! [`SharedState`] and removes the old value. Then it updates [`AddressableHeap`] with the new value and locks&updates
+//! the "current" counter in [`SharedState`] again. It then notifies the clean-up loop that there was an up.
+//!
+//! Note that in case of an override, the existing "last used" time will be used instead of "now", because just
+//! replacing an existing value (e.g. via a [refresh]) should not count as a use.
+//!
+//! ### Clean-up Loop
+//! This is the beefy bit. First it locks and reads the "current" counter in [`SharedState`]. It instantly unlocks the
+//! value to not block all pool members adding new values while it we figure out what to evict. Then it selects victims
+//! one by one by asking the individual pool members what they could remove. This shortly locks their
+//! [`AddressableHeap`]s (one member at the time). After enough victims where selected for eviction, it will delete in
+//! them one pool member at the time. Each pool member will lock their [`CallbackHandle`] and when the deletion happens
+//! also their [`AddressableHeap`] and the "current" counter in [`SharedState`]. However the lock order is identical to
+//! a normal "remove" operation.
+//!
+//! Note that the clean up loop does not directly update the "current" counter in [`SharedState`] since the "remove"
+//! routine already does that.
+//!
+//! ## Consistency
+//! This system is eventually consistent and we are a bit loose at a few places to make it more efficient and easier to
+//! implement. This subsection explains cases where this could be visible to an observer.
+//!
+//! ### Overcommit
+//! Since we add new data to the cache pool and the clean-up loop will eventually evict data, we overcommit the pool for
+//! a short time. In practice however we already allocated the memory before adding it to the pool.
+//!
+//! There is a another risk that the cached users will add data so fast that the clean-up loop cannot keep up. This
+//! however is highly unlikely, since the loop selects enough victims to get the resource usage below the limit and
+//! deletes these victims in batches. The more it runs behind, the large the batch will be.
//!
-//! 0. Lock [`PolicyBackend`] internals of the "source" of the set operation. This is an indirect operation.
-//! 1. Lock [`ResourcePoolInner`]
-//! 2. Lock "source" [`LruPolicyInner`]
-//! 3. Check if the entry already exists and remove it.
-//! 4. Drop lock of "source" [`LruPolicyInner`] so that the pool can use it to free up space.
-//! 5. Request to add more data to the pool:
-//! 1. Check if we need to free up space, otherwise we can already proceed to step 6.
-//! 2. Lock all pool members ([`PoolMember::lock`] which ultimately locks [`LruPolicyInner`])
-//! 3. Loop:
-//! 1. Ask pool members if they have anything to free.
-//! 2. Pick least recently used result and create (but not execute) [`ChangeRequest`] that would free it.
-//! 4. For all members that are NOT the source of the operation: Bundle collected [`ChangeRequest`]s into one per
-//! member, pre-pended with a lock drop. This gives:
-//! 1. Lock [`PolicyBackend`]
-//! 2. Drop lock of [`LruPolicyInner`].
-//! 3. Perform "remove" changes. (This will again acquire a lock on [`LruPolicyInner`] but does NOT result in
-//! a lock-gap!)
-//! 5. Drop lock of [`LruPolicyInner`] on "source" member
-//! 6. Lock "source" [`LruPolicyInner`]
-//! 7. Perform bookeeping changes for account for new member.
-//! 8. Drop lock of "source" [`LruPolicyInner`] and [`ResourcePoolInner`]
-//! 9. Let "source" [`PolicyBackend`] play out its change requests.
-//! 10. Drop internal [`PolicyBackend`] lock.
+//! ### Overdelete
+//! Similar to "overcommit", it is possible that the clean-up loop deletes more items than necessary. This can happen
+//! when between victim selection and actual deletion, entries are removed from the cache (e.g. via [TTL]). However the
+//! timing for that is very tight and we would have deleted the data anyways if the delete would have happened a tiny
+//! bit later, so in reality this is not a concern. On the other hand, the effect might also be a cache miss that was
+//! not strictly necessary and in turn worse performance than we could have had.
//!
-//! The global locks in step 5.2 are required so that the reads in step 5.3.1 and the resulting actions in step 5.3.2
-//! and step 5.4.3 are consistent. Otherwise an interleaved `get` request might invalidate the results.
+//! ### Victim-Use-Delete
+//! It is possible that a key is used between victim selection and its removal. In theory we should not remove the key
+//! in this case because its no longer "least recently used". However if the key usage would have occurred only a bit
+//! later, we would have removed the key anyways so this tight race has no practical meaning. No user can rely on such
+//! tight timings and the fullness of a cache pool.
+//!
+//! ### Victim-Downsize-Delete
+//! A selected victim might be replaced with a smaller one between victim selection and its deletion. In this case, the
+//! clean-up loop does not delete enough data in its current try but needs an additional iteration. In reality this is
+//! very unlikely since most cached entries rarely shrink and even if they do, the clean-up loop will eventually catch
+//! up again.
//!
//!
//! [`GET`]: Subscriber::get
-//! [`SET`]: Subscriber::set
-//! [`REMOVE`]: Subscriber::remove
//! [`PolicyBackend`]: super::PolicyBackend
+//! [refresh]: super::refresh
+//! [`REMOVE`]: Subscriber::remove
+//! [`SET`]: Subscriber::set
+//! [TTL]: super::ttl
use std::{
any::Any,
collections::{btree_map::Entry, BTreeMap},
fmt::Debug,
hash::Hash,
- marker::PhantomData,
sync::Arc,
};
use iox_time::Time;
use metric::{U64Counter, U64Gauge};
-use parking_lot::{Mutex, MutexGuard};
+use observability_deps::tracing::trace;
+use parking_lot::Mutex;
+use tokio::{runtime::Handle, sync::Notify, task::JoinSet};
use crate::{
addressable_heap::AddressableHeap,
+ backend::CacheBackend,
resource_consumption::{Resource, ResourceEstimator},
};
use super::{CallbackHandle, ChangeRequest, Subscriber};
-#[derive(Debug)]
/// Wrapper around something that can be converted into `u64`
/// to enable emitting metrics.
-struct MeasuredT<T> {
- v: T,
+#[derive(Debug)]
+struct MeasuredT<S>
+where
+ S: Resource,
+{
+ v: S,
metric: U64Gauge,
}
-impl<T> MeasuredT<T> {
- fn new(v: T, metric: U64Gauge) -> Self
- where
- T: Copy + Into<u64>,
- {
+impl<S> MeasuredT<S>
+where
+ S: Resource,
+{
+ fn new(v: S, metric: U64Gauge) -> Self {
metric.set(v.into());
Self { v, metric }
}
- fn inc(&mut self, delta: &T)
- where
- T: std::ops::Add<Output = T> + Copy + Into<u64>,
- {
+ fn inc(&mut self, delta: &S) {
self.v = self.v + *delta;
self.metric.inc((*delta).into());
}
- fn dec(&mut self, delta: &T)
- where
- T: std::ops::Sub<Output = T> + Copy + Into<u64>,
- {
+ fn dec(&mut self, delta: &S) {
self.v = self.v - *delta;
self.metric.dec((*delta).into());
}
}
-impl<T> PartialEq for MeasuredT<T>
-where
- T: PartialEq,
-{
- fn eq(&self, other: &Self) -> bool {
- self.v == other.v
- }
-}
-
-impl<T> PartialOrd for MeasuredT<T>
-where
- T: PartialOrd,
-{
- fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
- self.v.partial_cmp(&other.v)
- }
-}
-
-/// Inner state of [`ResourcePool`] which is always behind a mutex.
+/// Shared state between [`ResourcePool`] and [`clean_up_loop`].
#[derive(Debug)]
-struct ResourcePoolInner<S>
+struct SharedState<S>
where
S: Resource,
{
@@ -355,23 +359,57 @@ where
limit: MeasuredT<S>,
/// Current resource usage.
- current: MeasuredT<S>,
+ current: Mutex<MeasuredT<S>>,
/// Members (= backends) that use this pool.
- members: BTreeMap<&'static str, Box<dyn PoolMember<S = S>>>,
+ members: Mutex<BTreeMap<&'static str, Box<dyn PoolMember<S = S>>>>,
+
+ /// Notification when [`current`](Self::current) as changed.
+ change_notify: Notify,
}
-impl<S> ResourcePoolInner<S>
+/// Resource pool.
+///
+/// This can be used with [`LruPolicy`].
+#[derive(Debug)]
+pub struct ResourcePool<S>
where
S: Resource,
{
- /// Create new, empty pool.
- fn new(limit: S, pool_name: &'static str, metric_registry: &metric::Registry) -> Self {
- let current = S::zero();
+ /// Name of the pool.
+ name: &'static str,
+
+ /// Shared state.
+ shared: Arc<SharedState<S>>,
+
+ /// Metric registry associated with the pool.
+ ///
+ /// This is used to generate member-specific metrics as well.
+ metric_registry: Arc<metric::Registry>,
+
+ /// Background task.
+ _background_task: JoinSet<()>,
+
+ /// Notification when the background worker is idle, so tests know that the state has converged and that they can
+ /// continue working.
+ #[allow(dead_code)]
+ notify_idle_test_side: tokio::sync::mpsc::UnboundedSender<tokio::sync::oneshot::Sender<()>>,
+}
+impl<S> ResourcePool<S>
+where
+ S: Resource,
+{
+ /// Creates new empty resource pool with given limit.
+ pub fn new(
+ name: &'static str,
+ limit: S,
+ metric_registry: Arc<metric::Registry>,
+ runtime_handle: &Handle,
+ ) -> Self {
let metric_limit = metric_registry
.register_metric::<U64Gauge>("cache_lru_pool_limit", "Limit of the LRU resource pool")
- .recorder(&[("unit", S::unit()), ("pool", pool_name)]);
+ .recorder(&[("unit", S::unit()), ("pool", name)]);
let limit = MeasuredT::new(limit, metric_limit);
let metric_current = metric_registry
@@ -379,22 +417,48 @@ where
"cache_lru_pool_usage",
"Current consumption of the LRU resource pool",
)
- .recorder(&[("unit", S::unit()), ("pool", pool_name)]);
- let current = MeasuredT::new(current, metric_current);
+ .recorder(&[("unit", S::unit()), ("pool", name)]);
+ let current = Mutex::new(MeasuredT::new(S::zero(), metric_current));
- Self {
+ let shared = Arc::new(SharedState {
limit,
current,
- members: BTreeMap::new(),
+ members: Default::default(),
+ change_notify: Default::default(),
+ });
+
+ let (tx, rx) = tokio::sync::mpsc::unbounded_channel();
+
+ let mut background_task = JoinSet::new();
+ background_task.spawn_on(clean_up_loop(Arc::clone(&shared), rx), runtime_handle);
+
+ Self {
+ name,
+ shared,
+ metric_registry,
+ _background_task: background_task,
+ notify_idle_test_side: tx,
}
}
+ /// Get pool limit.
+ pub fn limit(&self) -> S {
+ self.shared.limit.v
+ }
+
+ /// Get current pool usage.
+ pub fn current(&self) -> S {
+ self.shared.current.lock().v
+ }
+
/// Register new pool member.
///
/// # Panic
/// Panics when a member with the specific ID is already registered.
- fn register_member(&mut self, id: &'static str, member: Box<dyn PoolMember<S = S>>) {
- match self.members.entry(id) {
+ fn register_member(&self, id: &'static str, member: Box<dyn PoolMember<S = S>>) {
+ let mut members = self.shared.members.lock();
+
+ match members.entry(id) {
Entry::Vacant(v) => {
v.insert(member);
}
@@ -408,118 +472,44 @@ where
///
/// # Panic
/// Panics when the member with the specified ID is unknown (or was already unregistered).
- fn unregister_member(&mut self, id: &str) {
- assert!(self.members.remove(id).is_some(), "Member '{id}' unknown");
- }
-
- /// Add used resource too pool.
- ///
- /// Returns a list of type-erased [`ChangeRequest`]s.
- fn add(&mut self, s: S, source_member_id: &'static str) -> Vec<Box<dyn Any>> {
- self.current.inc(&s);
-
- // collect requests to source member to avoid recursive access to their underlying backend
- let mut requests_to_source = vec![];
-
- if self.current > self.limit {
- // lock all members
- let mut members: Vec<_> = self
- .members
- .iter()
- .map(|(id, member)| (*id, member.lock(), vec![]))
- .collect();
+ fn unregister_member(&self, id: &str) {
+ let mut members = self.shared.members.lock();
- // evict data until we are below the limit
- while self.current > self.limit {
- let mut options: Vec<_> = members
- .iter_mut()
- .filter_map(|(id, member, requests)| {
- member.could_remove().map(|t| (t, member, id, requests))
- })
- .collect();
- options.sort_by_key(|(t, _member, _id, _requests)| *t);
-
- let (_t, member, _id, requests) =
- options.first_mut().expect("accounting out of sync");
- let (s, request) = member.remove_oldest();
-
- self.current.dec(&s);
- requests.push(request);
- }
-
- // submit change requests
- for (id, member, requests) in members {
- if id == source_member_id {
- requests_to_source = requests;
- } else {
- member.execute_requests(requests);
- }
- }
- }
-
- requests_to_source
+ assert!(members.remove(id).is_some(), "Member '{id}' unknown");
}
- /// Remove used resource from pool.
- fn remove(&mut self, s: S) {
- self.current.dec(&s);
- }
-}
-
-/// Resource pool.
-///
-/// This can be used with [`LruPolicy`].
-#[derive(Debug)]
-pub struct ResourcePool<S>
-where
- S: Resource,
-{
- inner: Mutex<ResourcePoolInner<S>>,
- name: &'static str,
- metric_registry: Arc<metric::Registry>,
-}
-
-impl<S> ResourcePool<S>
-where
- S: Resource,
-{
- /// Creates new empty resource pool with given limit.
- pub fn new(name: &'static str, limit: S, metric_registry: Arc<metric::Registry>) -> Self {
- Self {
- inner: Mutex::new(ResourcePoolInner::new(limit, name, &metric_registry)),
- name,
- metric_registry,
+ /// Add used resource from pool.
+ fn add(&self, s: S) {
+ let mut current = self.shared.current.lock();
+ current.inc(&s);
+ if current.v > self.shared.limit.v {
+ self.shared.change_notify.notify_one();
}
}
- /// Get pool limit.
- pub fn limit(&self) -> S {
- self.inner.lock().limit.v
+ /// Remove used resource from pool.
+ fn remove(&self, s: S) {
+ self.shared.current.lock().dec(&s);
}
- /// Get current pool usage.
- pub fn current(&self) -> S {
- self.inner.lock().current.v
+ /// Wait for the pool to converge to a steady state.
+ ///
+ /// This usually means that the background worker that runs the eviction loop is idle.
+ ///
+ /// # Panic
+ /// Panics if the background worker is not idle within 5s or if the worker died.
+ pub async fn wait_converged(&self) {
+ let (tx, rx) = tokio::sync::oneshot::channel();
+ self.notify_idle_test_side
+ .send(tx)
+ .expect("background worker alive");
+ tokio::time::timeout(std::time::Duration::from_secs(5), rx)
+ .await
+ .unwrap()
+ .unwrap();
}
}
-/// Inner state of [`LruPolicy`].
-///
-/// This is used by [`LruPolicy`] directly but also by [`PoolMemberImpl`] to add it to a [`ResourcePool`]/[`ResourcePoolInner`].
-#[derive(Debug)]
-struct LruPolicyInner<K, V, S>
-where
- K: Clone + Eq + Debug + Hash + Ord + Send + 'static,
- V: Clone + Debug + Send + 'static,
- S: Resource,
-{
- last_used: AddressableHeap<K, S, Time>,
- metric_count: U64Gauge,
- metric_usage: U64Gauge,
- metric_evicted: U64Counter,
- _phantom: PhantomData<V>,
-}
-
/// Cache policy that wraps another backend and limits its resource usage.
#[derive(Debug)]
pub struct LruPolicy<K, V, S>
@@ -528,10 +518,26 @@ where
V: Clone + Debug + Send + 'static,
S: Resource,
{
+ /// Pool member ID.
id: &'static str,
- inner: Arc<Mutex<LruPolicyInner<K, V, S>>>,
+
+ /// Link to central resource pool.
pool: Arc<ResourcePool<S>>,
+
+ /// Resource estimator that is used for new (via [`SET`](Subscriber::set)) entries.
resource_estimator: Arc<dyn ResourceEstimator<K = K, V = V, S = S>>,
+
+ /// Tracks when an element was used last.
+ ///
+ /// This is shared with [`PoolMemberImpl`], because [`clean_up_loop`] uses it via [`PoolMember::could_remove`] to
+ /// select victims for LRU evictions.
+ last_used: Arc<Mutex<AddressableHeap<K, S, Time>>>,
+
+ /// Count number of elements within this specific pool member.
+ metric_count: U64Gauge,
+
+ /// Count resource usage of this specific pool member.
+ metric_usage: U64Gauge,
}
impl<K, V, S> LruPolicy<K, V, S>
@@ -578,27 +584,25 @@ where
move |mut callback_handle| {
callback_handle.execute_requests(vec![ChangeRequest::ensure_empty()]);
- let inner = Arc::new(Mutex::new(LruPolicyInner {
- last_used: AddressableHeap::new(),
- metric_count,
- metric_usage,
- metric_evicted,
- _phantom: PhantomData,
- }));
+ let last_used = Arc::new(Mutex::new(AddressableHeap::new()));
- pool.inner.lock().register_member(
+ pool.register_member(
id,
Box::new(PoolMemberImpl {
- inner: Arc::clone(&inner),
+ id,
+ last_used: Arc::clone(&last_used),
+ metric_evicted,
callback_handle: Mutex::new(callback_handle),
}),
);
Self {
id,
- inner,
pool,
resource_estimator,
+ last_used,
+ metric_count,
+ metric_usage,
}
}
}
@@ -611,7 +615,7 @@ where
S: Resource,
{
fn drop(&mut self) {
- self.pool.inner.lock().unregister_member(self.id);
+ self.pool.unregister_member(self.id);
}
}
@@ -625,10 +629,11 @@ where
type V = V;
fn get(&mut self, k: &Self::K, now: Time) -> Vec<ChangeRequest<'static, Self::K, Self::V>> {
- let mut inner = self.inner.lock();
+ trace!(?k, now = now.timestamp(), "LRU get",);
+ let mut last_used = self.last_used.lock();
// update "last used"
- inner.last_used.update_order(k, now);
+ last_used.update_order(k, now);
vec![]
}
@@ -639,65 +644,61 @@ where
v: &Self::V,
now: Time,
) -> Vec<ChangeRequest<'static, Self::K, Self::V>> {
+ trace!(?k, now = now.timestamp(), "LRU set",);
+
// determine all attributes before getting any locks
let consumption = self.resource_estimator.consumption(k, v);
// "last used" time for new entry
// Note: this might be updated if the entry already exists
- let mut last_used = now;
-
- // get locks
- let mut pool = self.pool.inner.lock();
+ let mut last_used_t = now;
// check for oversized entries
- if consumption > pool.limit.v {
+ if consumption > self.pool.shared.limit.v {
return vec![ChangeRequest::remove(k.clone())];
}
- // maybe clean from pool
{
- let mut inner = self.inner.lock();
- if let Some((consumption, last_used_previously)) = inner.last_used.remove(k) {
- pool.remove(consumption);
- inner.metric_count.dec(1);
- inner.metric_usage.dec(consumption.into());
- last_used = last_used_previously;
+ let mut last_used = self.last_used.lock();
+
+ // maybe clean from pool
+ if let Some((consumption, last_used_t_previously)) = last_used.remove(k) {
+ self.pool.remove(consumption);
+ self.metric_count.dec(1);
+ self.metric_usage.dec(consumption.into());
+ last_used_t = last_used_t_previously;
}
+
+ // add new entry to inner backend BEFORE adding it to the pool, because the we can overcommit for a short
+ // time and we want to give the pool a chance to also evict the new resource
+ last_used.insert(k.clone(), consumption, last_used_t);
+ self.metric_count.inc(1);
+ self.metric_usage.inc(consumption.into());
}
// pool-wide operation
- // Since this may call back to this very backend to remove entries, we MUST NOT hold an inner lock at this
- // point.
- let change_requests = pool.add(consumption, self.id);
-
- // add new entry to inner backend AFTER adding it to the pool, so we are never overcommitting resources.
- let mut inner = self.inner.lock();
- inner.last_used.insert(k.clone(), consumption, last_used);
- inner.metric_count.inc(1);
- inner.metric_usage.inc(consumption.into());
+ // Since this may wake-up the background worker and cause evictions, drop the `last_used` lock before doing this (see
+ // block above) to avoid lock contention.
+ self.pool.add(consumption);
- downcast_change_requests(change_requests)
+ vec![]
}
- fn remove(&mut self, k: &Self::K, _now: Time) -> Vec<ChangeRequest<'static, Self::K, Self::V>> {
- let mut inner = self.inner.lock();
-
- if let Some((consumption, _last_used)) = inner.last_used.remove(k) {
- // only lock pool after we are sure that there is anything to do prevent lock contention
- let mut pool = self.pool.inner.lock();
+ fn remove(&mut self, k: &Self::K, now: Time) -> Vec<ChangeRequest<'static, Self::K, Self::V>> {
+ trace!(?k, now = now.timestamp(), "LRU remove",);
+ let mut last_used = self.last_used.lock();
- pool.remove(consumption);
- inner.metric_count.dec(1);
- inner.metric_usage.dec(consumption.into());
+ if let Some((consumption, _last_used)) = last_used.remove(k) {
+ self.pool.remove(consumption);
+ self.metric_count.dec(1);
+ self.metric_usage.dec(consumption.into());
}
vec![]
}
}
-/// A member of a [`ResourcePool`]/[`ResourcePoolInner`].
-///
-/// Must be [locked](Self::lock) to gain access.
+/// A member of a [`ResourcePool`]/[`SharedState`].
///
/// The only implementation of this is [`PoolMemberImpl`]. This indirection is required to erase `K` and `V` from specific
/// backend so we can stick it into the generic pool.
@@ -705,8 +706,18 @@ trait PoolMember: Debug + Send + 'static {
/// Resource type.
type S;
- /// Lock pool member.
- fn lock(&self) -> Box<dyn PoolMemberGuard<S = Self::S> + '_>;
+ /// Check if this member has anything that could be removed.
+ ///
+ /// If so, return:
+ /// - "last used" timestamp
+ /// - resource consumption of that entry
+ /// - type-erased key
+ fn could_remove(&self) -> Option<(Time, Self::S, Box<dyn Any>)>;
+
+ /// Remove given set of keys.
+ ///
+ /// The keys MUST be a result of [`could_remove`](Self::could_remove), otherwise the downcasting may not work and panic.
+ fn remove_keys(&self, keys: Vec<Box<dyn Any>>);
}
/// The only implementation of [`PoolMember`].
@@ -719,8 +730,32 @@ where
V: Clone + Debug + Send + 'static,
S: Resource,
{
+ /// Pool member ID.
+ id: &'static str,
+
+ /// Count number of evicted items.
+ metric_evicted: U64Counter,
+
+ /// Tracks usage of the last used elements.
+ ///
+ /// See documentation of [`callback_handle`](Self::callback_handle) for a reasoning about locking.
+ last_used: Arc<Mutex<AddressableHeap<K, S, Time>>>,
+
+ /// Handle to call back into the [`PolicyBackend`] to evict data.
+ ///
+ /// # Locking
+ /// This MUST NOT share a lock with [`last_used`](Self::last_used) because otherwise we would deadlock during
+ /// eviction:
+ ///
+ /// 1. [`remove_keys`](PoolMember::remove_keys)
+ /// 2. lock both [`callback_handle`](Self::callback_handle) and [`last_used`](Self::last_used)
+ /// 3. [`CallbackHandle::execute_requests`]
+ /// 4. [`Subscriber::remove`]
+ /// 5. need to lock [`last_used`](Self::last_used) again
+ ///
+ ///
+ /// [`PolicyBackend`]: super::PolicyBackend
callback_handle: Mutex<CallbackHandle<K, V>>,
- inner: Arc<Mutex<LruPolicyInner<K, V, S>>>,
}
impl<K, V, S> PoolMember for PoolMemberImpl<K, V, S>
@@ -731,111 +766,119 @@ where
{
type S = S;
- fn lock(&self) -> Box<dyn PoolMemberGuard<S = Self::S> + '_> {
- Box::new(PoolMemberGuardImpl {
- callback_handle: self.callback_handle.lock(),
- inner: Some(self.inner.lock()),
- })
+ fn could_remove(&self) -> Option<(Time, Self::S, Box<dyn Any>)> {
+ let last_used = self.last_used.lock();
+ last_used
+ .peek()
+ .map(|(k, s, t)| (*t, *s, Box::new(k.clone()) as _))
}
-}
-/// Locked [`ResourcePool`]/[`ResourcePoolInner`] member.
-///
-/// The only implementation of this is [`PoolMemberGuardImpl`]. This indirection is required to erase `K` and `V` from
-/// specific backend so we can stick it into the generic pool.
-trait PoolMemberGuard: Debug {
- /// Resource type.
- type S;
+ fn remove_keys(&self, keys: Vec<Box<dyn Any>>) {
+ let keys = keys
+ .into_iter()
+ .map(|k| *k.downcast::<K>().expect("wrong type"))
+ .collect::<Vec<K>>();
- /// Check if this member has anything that could be removed. If so, return the "last used" timestamp of the oldest
- /// entry.
- fn could_remove(&self) -> Option<Time>;
+ trace!(
+ id = self.id,
+ ?keys,
+ "evicting cache entries due to LRU pressure",
+ );
+ self.metric_evicted.inc(keys.len() as u64);
- /// Remove oldest entry and return consumption of the removed entry and an opaque [`ChangeRequest`].
- ///
- /// This method is used for pool members that did NOT trigger the removal.
- ///
- /// # Panic
- /// This must only be used if [`could_remove`](Self::could_remove) was used to check if there is anything to check
- /// if there is an entry that could be removed. Panics if this is not the case.
- fn remove_oldest(&mut self) -> (Self::S, Box<dyn Any>);
+ let combined = ChangeRequest::from_fn(move |backend| {
+ for k in keys {
+ backend.remove(&k);
+ }
+ });
- /// Perform opaque [`ChangeRequest`]s on pool member.
- fn execute_requests(self: Box<Self>, requests: Vec<Box<dyn Any>>);
+ self.callback_handle.lock().execute_requests(vec![combined]);
+ }
}
-/// The only implementation of [`PoolMemberGuard`].
+/// Background worker that eventually cleans up data if the pool reaches capacity.
///
-/// In contrast to the trait, this still contains `K` and `V`.
-#[derive(Debug)]
-pub struct PoolMemberGuardImpl<'a, K, V, S>
-where
- K: Clone + Eq + Debug + Hash + Ord + Send + 'static,
- V: Clone + Debug + Send + 'static,
+/// This method NEVER returns.
+async fn clean_up_loop<S>(
+ shared: Arc<SharedState<S>>,
+ mut notify_idle_worker_side: tokio::sync::mpsc::UnboundedReceiver<
+ tokio::sync::oneshot::Sender<()>,
+ >,
+) where
S: Resource,
{
- callback_handle: MutexGuard<'a, CallbackHandle<K, V>>,
- inner: Option<MutexGuard<'a, LruPolicyInner<K, V, S>>>,
-}
-
-impl<'a, K, V, S> PoolMemberGuard for PoolMemberGuardImpl<'a, K, V, S>
-where
- K: Clone + Eq + Debug + Hash + Ord + Send + 'static,
- V: Clone + Debug + Send + 'static,
- S: Resource,
-{
- type S = S;
-
- fn could_remove(&self) -> Option<Time> {
- let inner = self.inner.as_ref().expect("not yet finalized");
- inner.last_used.peek().map(|(_k, _s, t)| *t)
- }
-
- fn remove_oldest(&mut self) -> (Self::S, Box<dyn Any>) {
- let inner = self.inner.as_mut().expect("not yet finalized");
-
- let (k, s, _t) = inner.last_used.pop().expect("nothing to remove");
- inner.metric_count.dec(1);
- inner.metric_usage.dec(s.into());
- inner.metric_evicted.inc(1);
- (s, Box::new(ChangeRequest::<'static, K, V>::remove(k)))
- }
+ 'outer: loop {
+ // yield to tokio so that the runtime has a chance to abort this function during shutdown
+ tokio::task::yield_now().await;
+
+ // get current value but drop the lock immediately
+ // Especially we must NOT hold the lock when we later execute the change requests, otherwise there will be two
+ // lock direction:
+ // - someone adding new resource: member -> pool
+ // - clean up loop: pool -> memeber
+ let mut current = {
+ let guard = shared.current.lock();
+ guard.v
+ };
+
+ if current <= shared.limit.v {
+ // nothing to do, sleep and then continue w/ next round
+ loop {
+ tokio::select! {
+ // biased sleep so we can notify test hooks if we're idle
+ biased;
+
+ _ = shared.change_notify.notified() => {continue 'outer;},
+
+ idle_notify = notify_idle_worker_side.recv() => {
+ if let Some(n) = idle_notify {
+ n.send(()).ok();
+ }
+ },
+ }
+ }
+ }
- fn execute_requests(mut self: Box<Self>, requests: Vec<Box<dyn Any>>) {
- let requests = downcast_change_requests::<K, V>(requests);
- let inner = self.inner.take().expect("not yet finalized");
+ // hold member lock
+ // this is OK since this is only modified when new members are added. The members itself do NOT interact with
+ // this value.
+ let members = shared.members.lock();
- let combined = ChangeRequest::from_fn(|backend| {
- drop(inner);
+ // select victims
+ let mut victims: BTreeMap<&'static str, Vec<Box<dyn Any>>> = Default::default();
+ while current > shared.limit.v {
+ let mut options: Vec<_> = members
+ .iter()
+ .filter_map(|(id, member)| member.could_remove().map(|(t, s, k)| (t, s, k, *id)))
+ .collect();
+ options.sort_by_key(|(t, _s, _k, _id)| *t);
- for request in requests {
- request.eval(backend);
+ match options.into_iter().next() {
+ Some((_t, s, k, id)) => {
+ current = current - s;
+ victims.entry(id).or_default().push(k);
+ }
+ None => {
+ // data was deleted in the meantime, stop looping
+ break;
+ }
}
- });
+ }
- self.callback_handle.execute_requests(vec![combined]);
+ for (id, keys) in victims {
+ let member = members
+ .get(id)
+ .expect("did not drop the lock in the meantime");
+ member.remove_keys(keys);
+ }
}
}
-fn downcast_change_requests<K, V>(requests: Vec<Box<dyn Any>>) -> Vec<ChangeRequest<'static, K, V>>
-where
- K: Clone + Eq + Hash + Ord + Debug + Send + 'static,
- V: Clone + Debug + Send + 'static,
-{
- requests
- .into_iter()
- .map(|cr| {
- *cr.downcast::<ChangeRequest<'static, K, V>>()
- .expect("Inner change request type")
- })
- .collect()
-}
-
#[cfg(test)]
mod tests {
use std::{collections::HashMap, time::Duration};
- use iox_time::MockProvider;
+ use iox_time::{MockProvider, SystemProvider};
use metric::{Observation, RawReporter};
use crate::{
@@ -845,14 +888,15 @@ mod tests {
use super::*;
- #[test]
+ #[tokio::test]
#[should_panic(expected = "inner backend is not empty")]
- fn test_panic_inner_not_empty() {
+ async fn test_panic_inner_not_empty() {
let time_provider = Arc::new(MockProvider::new(Time::from_timestamp_nanos(0)));
let pool = Arc::new(ResourcePool::new(
"pool",
TestSize(10),
Arc::new(metric::Registry::new()),
+ &Handle::current(),
));
let resource_estimator = Arc::new(TestResourceEstimator {});
@@ -868,14 +912,15 @@ mod tests {
})
}
- #[test]
+ #[tokio::test]
#[should_panic(expected = "Member 'id' already registered")]
- fn test_panic_id_collision() {
+ async fn test_panic_id_collision() {
let time_provider = Arc::new(MockProvider::new(Time::from_timestamp_nanos(0)));
let pool = Arc::new(ResourcePool::new(
"pool",
TestSize(10),
Arc::new(metric::Registry::new()),
+ &Handle::current(),
));
let resource_estimator = Arc::new(TestResourceEstimator {});
@@ -894,13 +939,14 @@ mod tests {
));
}
- #[test]
- fn test_reregister_member() {
+ #[tokio::test]
+ async fn test_reregister_member() {
let time_provider = Arc::new(MockProvider::new(Time::from_timestamp_nanos(0)));
let pool = Arc::new(ResourcePool::new(
"pool",
TestSize(10),
Arc::new(metric::Registry::new()),
+ &Handle::current(),
));
let resource_estimator = Arc::new(TestResourceEstimator {});
@@ -921,13 +967,14 @@ mod tests {
));
}
- #[test]
- fn test_empty() {
+ #[tokio::test]
+ async fn test_empty() {
let time_provider = Arc::new(MockProvider::new(Time::from_timestamp_nanos(0)));
let pool = Arc::new(ResourcePool::new(
"pool",
TestSize(10),
Arc::new(metric::Registry::new()),
+ &Handle::current(),
));
let resource_estimator = Arc::new(TestResourceEstimator {});
@@ -943,13 +990,14 @@ mod tests {
assert_eq!(pool.current().0, 0);
}
- #[test]
- fn test_double_set() {
+ #[tokio::test]
+ async fn test_double_set() {
let time_provider = Arc::new(MockProvider::new(Time::from_timestamp_nanos(0)));
let pool = Arc::new(ResourcePool::new(
"pool",
TestSize(2),
Arc::new(metric::Registry::new()),
+ &Handle::current(),
));
let resource_estimator = Arc::new(TestResourceEstimator {});
@@ -971,17 +1019,19 @@ mod tests {
time_provider.inc(Duration::from_millis(1));
backend.set(String::from("c"), 1usize);
+ pool.wait_converged().await;
assert_eq!(backend.get(&String::from("a")), None);
}
- #[test]
- fn test_override() {
+ #[tokio::test]
+ async fn test_override() {
let time_provider = Arc::new(MockProvider::new(Time::from_timestamp_nanos(0)));
let pool = Arc::new(ResourcePool::new(
"pool",
TestSize(10),
Arc::new(metric::Registry::new()),
+ &Handle::current(),
));
let resource_estimator = Arc::new(TestResourceEstimator {});
@@ -1002,13 +1052,14 @@ mod tests {
assert_eq!(pool.current().0, 7);
}
- #[test]
- fn test_remove() {
+ #[tokio::test]
+ async fn test_remove() {
let time_provider = Arc::new(MockProvider::new(Time::from_timestamp_nanos(0)));
let pool = Arc::new(ResourcePool::new(
"pool",
TestSize(10),
Arc::new(metric::Registry::new()),
+ &Handle::current(),
));
let resource_estimator = Arc::new(TestResourceEstimator {});
@@ -1036,13 +1087,14 @@ mod tests {
assert_eq!(pool.current().0, 3);
}
- #[test]
- fn test_eviction_order() {
+ #[tokio::test]
+ async fn test_eviction_order() {
let time_provider = Arc::new(MockProvider::new(Time::from_timestamp_nanos(0)));
let pool = Arc::new(ResourcePool::new(
"pool",
TestSize(21),
Arc::new(metric::Registry::new()),
+ &Handle::current(),
));
let resource_estimator = Arc::new(TestResourceEstimator {});
@@ -1078,6 +1130,7 @@ mod tests {
time_provider.inc(Duration::from_millis(1));
// now are exactly at capacity
+ pool.wait_converged().await;
assert_inner_backend(
&mut backend1,
[
@@ -1094,6 +1147,7 @@ mod tests {
// adding a single element will drop the smallest key from the first backend (by ID)
backend1.set(String::from("foo1"), 1usize);
+ pool.wait_converged().await;
assert_eq!(pool.current().0, 19);
assert_inner_backend(
&mut backend1,
@@ -1111,6 +1165,7 @@ mod tests {
// now we can fill up data up to the capacity again
backend1.set(String::from("foo2"), 2usize);
+ pool.wait_converged().await;
assert_eq!(pool.current().0, 21);
assert_inner_backend(
&mut backend1,
@@ -1129,6 +1184,7 @@ mod tests {
// can evict two keys at the same time
backend1.set(String::from("foo3"), 2usize);
+ pool.wait_converged().await;
assert_eq!(pool.current().0, 18);
assert_inner_backend(
&mut backend1,
@@ -1146,6 +1202,7 @@ mod tests {
// can evict from another backend
backend1.set(String::from("foo4"), 4usize);
+ pool.wait_converged().await;
assert_eq!(pool.current().0, 20);
assert_inner_backend(
&mut backend1,
@@ -1161,6 +1218,7 @@ mod tests {
// can evict multiple timestamps
backend1.set(String::from("foo5"), 7usize);
+ pool.wait_converged().await;
assert_eq!(pool.current().0, 16);
assert_inner_backend(
&mut backend1,
@@ -1175,13 +1233,14 @@ mod tests {
assert_inner_backend(&mut backend2, []);
}
- #[test]
- fn test_get_updates_last_used() {
+ #[tokio::test]
+ async fn test_get_updates_last_used() {
let time_provider = Arc::new(MockProvider::new(Time::from_timestamp_nanos(0)));
let pool = Arc::new(ResourcePool::new(
"pool",
TestSize(6),
Arc::new(metric::Registry::new()),
+ &Handle::current(),
));
let resource_estimator = Arc::new(TestResourceEstimator {});
@@ -1198,6 +1257,7 @@ mod tests {
time_provider.inc(Duration::from_millis(1));
backend.set(String::from("c"), 3usize);
+ pool.wait_converged().await;
time_provider.inc(Duration::from_millis(1));
@@ -1214,6 +1274,7 @@ mod tests {
);
backend.set(String::from("foo"), 3usize);
+ pool.wait_converged().await;
assert_eq!(pool.current().0, 4);
assert_inner_backend(
&mut backend,
@@ -1221,13 +1282,14 @@ mod tests {
);
}
- #[test]
- fn test_oversized_entries() {
+ #[tokio::test]
+ async fn test_oversized_entries() {
let time_provider = Arc::new(MockProvider::new(Time::from_timestamp_nanos(0)));
let pool = Arc::new(ResourcePool::new(
"pool",
TestSize(10),
Arc::new(metric::Registry::new()),
+ &Handle::current(),
));
let resource_estimator = Arc::new(TestResourceEstimator {});
@@ -1239,20 +1301,23 @@ mod tests {
));
backend.set(String::from("a"), 1usize);
+ pool.wait_converged().await;
backend.set(String::from("b"), 11usize);
+ pool.wait_converged().await;
// "a" did NOT get evicted. Instead we removed the oversized entry straight away.
assert_eq!(pool.current().0, 1);
assert_inner_backend(&mut backend, [(String::from("a"), 1)]);
}
- #[test]
- fn test_values_are_dropped() {
+ #[tokio::test]
+ async fn test_values_are_dropped() {
let time_provider = Arc::new(MockProvider::new(Time::from_timestamp_nanos(0)));
let pool = Arc::new(ResourcePool::new(
"pool",
TestSize(3),
Arc::new(metric::Registry::new()),
+ &Handle::current(),
));
#[derive(Debug)]
@@ -1285,22 +1350,25 @@ mod tests {
let v1_weak = Arc::downgrade(&v1);
backend.set(k1, v1);
+ pool.wait_converged().await;
time_provider.inc(Duration::from_millis(1));
backend.set(k2, v2);
+ pool.wait_converged().await;
assert_eq!(k1_weak.strong_count(), 0);
assert_eq!(v1_weak.strong_count(), 0);
}
- #[test]
- fn test_backends_are_dropped() {
+ #[tokio::test]
+ async fn test_backends_are_dropped() {
let time_provider = Arc::new(MockProvider::new(Time::from_timestamp_nanos(0)));
let pool = Arc::new(ResourcePool::new(
"pool",
TestSize(3),
Arc::new(metric::Registry::new()),
+ &Handle::current(),
));
let resource_estimator = Arc::new(TestResourceEstimator {});
@@ -1358,14 +1426,15 @@ mod tests {
assert_eq!(marker_weak.strong_count(), 0);
}
- #[test]
- fn test_metrics() {
+ #[tokio::test]
+ async fn test_metrics() {
let time_provider = Arc::new(MockProvider::new(Time::from_timestamp_nanos(0)));
let metric_registry = Arc::new(metric::Registry::new());
let pool = Arc::new(ResourcePool::new(
"pool",
TestSize(10),
Arc::clone(&metric_registry),
+ &Handle::current(),
));
let resource_estimator = Arc::new(TestResourceEstimator {});
@@ -1439,11 +1508,17 @@ mod tests {
);
backend.set(String::from("a"), 1usize); // usage = 1
+ pool.wait_converged().await;
backend.set(String::from("b"), 2usize); // usage = 3
+ pool.wait_converged().await;
backend.set(String::from("b"), 3usize); // usage = 4
+ pool.wait_converged().await;
backend.set(String::from("c"), 4usize); // usage = 8
+ pool.wait_converged().await;
backend.set(String::from("d"), 3usize); // usage = 10 (evicted "a")
+ pool.wait_converged().await;
backend.remove(&String::from("c")); // usage = 6
+ pool.wait_converged().await;
let mut reporter = RawReporter::default();
metric_registry.report(&mut reporter);
@@ -1489,8 +1564,13 @@ mod tests {
);
}
- #[test]
- fn test_generic_backend() {
+ /// A note regarding the test flavor:
+ ///
+ /// The main generic test function is not async, so the background clean-up would never fire because we don't
+ /// yield to tokio. The test will pass in both cases (w/ a single worker and w/ multiple), however if the
+ /// background worker is a actually doing anything it might be a more realistic test case.
+ #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
+ async fn test_generic_backend() {
use crate::backend::test_util::test_generic;
#[derive(Debug)]
@@ -1512,6 +1592,7 @@ mod tests {
"pool",
TestSize(10),
Arc::new(metric::Registry::new()),
+ &Handle::current(),
));
let resource_estimator = Arc::new(ZeroSizeProvider {});
@@ -1525,6 +1606,68 @@ mod tests {
});
}
+ /// Regression test for <https://github.com/influxdata/influxdb_iox/issues/8334>.
+ #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
+ async fn test_deadlock() {
+ #[derive(Debug)]
+ struct OneSizeProvider {}
+
+ impl ResourceEstimator for OneSizeProvider {
+ type K = u128;
+ type V = ();
+ type S = TestSize;
+
+ fn consumption(&self, _k: &Self::K, _v: &Self::V) -> Self::S {
+ TestSize(1)
+ }
+ }
+
+ let time_provider = Arc::new(SystemProvider::new()) as _;
+ let pool = Arc::new(ResourcePool::new(
+ "pool",
+ TestSize(100),
+ Arc::new(metric::Registry::new()),
+ &Handle::current(),
+ ));
+ let resource_estimator = Arc::new(OneSizeProvider {});
+
+ let mut backend1 = PolicyBackend::hashmap_backed(Arc::clone(&time_provider));
+ backend1.add_policy(LruPolicy::new(
+ Arc::clone(&pool),
+ "id1",
+ Arc::clone(&resource_estimator) as _,
+ ));
+
+ let mut backend2 = PolicyBackend::hashmap_backed(Arc::clone(&time_provider));
+ backend2.add_policy(LruPolicy::new(
+ Arc::clone(&pool),
+ "id2",
+ Arc::clone(&resource_estimator) as _,
+ ));
+
+ let worker1 = tokio::spawn(async move {
+ let mut counter = 0u128;
+ loop {
+ backend1.set(counter, ());
+ counter += 2;
+ tokio::task::yield_now().await;
+ }
+ });
+ let worker2 = tokio::spawn(async move {
+ let mut counter = 1u128;
+ loop {
+ backend2.set(counter, ());
+ counter += 2;
+ tokio::task::yield_now().await;
+ }
+ });
+
+ tokio::time::sleep(Duration::from_secs(1)).await;
+
+ worker1.abort();
+ worker2.abort();
+ }
+
#[derive(Debug)]
struct TestResourceEstimator {}
diff --git a/cache_system/src/backend/policy/mod.rs b/cache_system/src/backend/policy/mod.rs
index 4cb794c76b..d525d28439 100644
--- a/cache_system/src/backend/policy/mod.rs
+++ b/cache_system/src/backend/policy/mod.rs
@@ -393,7 +393,11 @@ where
/// structures while calling this function if you plan to also [subscribe](Subscriber) to
/// changes because this would easily lead to deadlocks.
pub fn execute_requests(&mut self, change_requests: Vec<ChangeRequest<'_, K, V>>) {
- let inner = self.inner.upgrade().expect("backend gone");
+ let Some(inner) = self.inner.upgrade() else {
+ // backend gone, can happen during shutdowns, try not to panic
+ return;
+ };
+
lock_inner!(mut guard = inner);
perform_changes(&mut guard, change_requests);
}
diff --git a/cache_system/src/resource_consumption.rs b/cache_system/src/resource_consumption.rs
index 23bcd4ed97..62609e7a60 100644
--- a/cache_system/src/resource_consumption.rs
+++ b/cache_system/src/resource_consumption.rs
@@ -9,7 +9,15 @@ use std::{
///
/// Can be used to represent in-RAM memory as well as on-disc memory.
pub trait Resource:
- Add<Output = Self> + Copy + Debug + Into<u64> + PartialOrd + Send + Sub<Output = Self> + 'static
+ Add<Output = Self>
+ + Copy
+ + Debug
+ + Into<u64>
+ + PartialOrd
+ + Send
+ + Sync
+ + Sub<Output = Self>
+ + 'static
{
/// Create resource consumption of zero.
fn zero() -> Self;
diff --git a/querier/src/cache/mod.rs b/querier/src/cache/mod.rs
index 0cfd0615af..3b48c16436 100644
--- a/querier/src/cache/mod.rs
+++ b/querier/src/cache/mod.rs
@@ -113,11 +113,13 @@ impl CatalogCache {
"ram_metadata",
RamSize(ram_pool_metadata_bytes),
Arc::clone(&metric_registry),
+ &Handle::current(),
));
let ram_pool_data = Arc::new(ResourcePool::new(
"ram_data",
RamSize(ram_pool_data_bytes),
Arc::clone(&metric_registry),
+ &Handle::current(),
));
let partition_cache = PartitionCache::new(
diff --git a/querier/src/cache/ram.rs b/querier/src/cache/ram.rs
index 4f7c96bc59..861a249a66 100644
--- a/querier/src/cache/ram.rs
+++ b/querier/src/cache/ram.rs
@@ -43,12 +43,14 @@ pub mod test_util {
use std::sync::Arc;
use cache_system::backend::policy::lru::ResourcePool;
+ use tokio::runtime::Handle;
pub fn test_ram_pool() -> Arc<ResourcePool<RamSize>> {
Arc::new(ResourcePool::new(
"pool",
RamSize(usize::MAX),
Arc::new(metric::Registry::new()),
+ &Handle::current(),
))
}
}
|
c39ae97dd4721e0485f95f3830b33147906885de
|
Luke Bond
|
2022-10-25 16:40:13
|
temporarily disable circle filter to build & push PRs (#5973)
|
* chore: temporarily disable circle filter to build & push PRs
* chore: allow build & push of container image for branches using param
* chore: indentation fix in circle config
* chore: rename build_perf to release_branch
| null |
chore: temporarily disable circle filter to build & push PRs (#5973)
* chore: temporarily disable circle filter to build & push PRs
* chore: allow build & push of container image for branches using param
* chore: indentation fix in circle config
* chore: rename build_perf to release_branch
|
diff --git a/.circleci/config.yml b/.circleci/config.yml
index b9391ee879..332cafc743 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -15,9 +15,33 @@
# On main if all these checks pass it will then additionally compile in "release" mode and
# publish a docker image to quay.io/influxdb/iox:$COMMIT_SHA
#
-# Manual CI Image:
+# Manually trigger build and push of container image for a branch:
#
-# It is possible to manually trigger a rebuild of the image used in CI. To do this, navigate to
+# Navigate to https://app.circleci.com/pipelines/github/influxdata/influxdb_iox?branch=<branch-name> (<- change this!)
+# Then:
+#
+# - Click "Run Pipeline" in the top-right
+# - Expand "Add Parameters"
+# - Add a "boolean" parameter called "release_branch" with the value true
+# - Click "Run Pipeline"
+#
+# You can also do this using the CircleCI API:
+#
+# Using `xh`:
+#
+# # e.g. using 'xh' (https://github.com/ducaale/xh)
+# $ xh -a '<your personal circleCI token>:' POST \
+# https://circleci.com/api/v2/project/github/influxdata/influxdb_iox/pipeline \
+# parameters:='{"release_branch": true}' branch=chore/ci-tidy-up
+#
+# ...or equivalent with `curl`:
+# $ curl -XPOST -H "Content-Type: application/json" -H "Circle-Token: <your personal circleCI token>" \
+# -d '{"parameters": {"release_branch": true}, "branch": "chore/ci-tidy-up"}' \
+# https://circleci.com/api/v2/project/github/influxdata/influxdb_iox/pipeline
+#
+# Manual CI Base Image:
+#
+# It is possible to manually trigger a rebuild of the base image used in CI. To do this, navigate to
# https://app.circleci.com/pipelines/github/influxdata/influxdb_iox?branch=main (overriding the
# branch name if desired). Then:
# - Click "Run Pipeline" in the top-right
@@ -457,9 +481,8 @@ parameters:
description: "Trigger build of CI image"
type: boolean
default: false
- build_perf:
- # see comments below in build_perf job for usage
- description: "Trigger build of perf image"
+ release_branch:
+ description: "Build and push container image for non-main branch"
type: boolean
default: false
@@ -471,7 +494,7 @@ workflows:
when:
and:
- not: << pipeline.parameters.ci_image >>
- - not: << pipeline.parameters.build_perf >>
+ - not: << pipeline.parameters.release_branch >>
jobs:
- fmt
- lint
@@ -509,20 +532,10 @@ workflows:
jobs:
- ci_image
- # Manual build of release image for a branch.
- # Trigger using the CircleCI API, like so:
- #
- # # e.g. using 'xh' (https://github.com/ducaale/xh)
- # $ xh -a '<your personal circleCI token>:' POST \
- # https://circleci.com/api/v2/project/github/influxdata/influxdb_iox/pipeline \
- # parameters:='{"build_perf": true}' branch=chore/ci-tidy-up
- #
- # ...or equivalent with `curl`:
- # $ curl -XPOST -H "Content-Type: application/json" -H "Circle-Token: <your personal circleCI token>" \
- # -d '{"parameters": {"build_perf": true}, "branch": "chore/ci-tidy-up"}' \
- # https://circleci.com/api/v2/project/github/influxdata/influxdb_iox/pipeline
- build_perf:
- when: << pipeline.parameters.build_perf >>
+ # Force build and push of container image for non-main branch.
+ # See instructions at the top of this file
+ release_branch:
+ when: << pipeline.parameters.release_branch >>
jobs:
- build_release
- deploy_release:
|
678fb81892cdb2042e3ab562b051178476e8972d
|
Dom Dwyer
|
2022-10-21 14:25:43
|
use partition buffer FSM
|
This commit makes use of the partition buffer state machine introduced
in https://github.com/influxdata/influxdb_iox/pull/5943.
This commit significantly changes the buffering, and querying, of data
from a partition, swapping out the existing "DataBuffer" for the new
state machine implementation (itself simplified due to temporary lack of
incremental snapshot generation, see #5944).
This commit simplifies the query path, removing multiple types that
wrapped one-another to pass around various state necessary to perform a
query, with various query functions needing different types or
combinations of types. The query path now operates using a single type
(named "QueryAdaptor") that provides a queryable interface over the set
of RecordBatch returned from a partition.
There is significantly increased testing of the PartitionData itself,
covering data in various states and the ordering of returned RecordBatch
(to ensure correct materialisation of updates). There are also
invariants upheld by the type system / compiler to minimise the
complexities of working with empty batches & states, and many asserts
that ensure (mostly existing!) invariants are upheld.
| null |
refactor(ingester): use partition buffer FSM
This commit makes use of the partition buffer state machine introduced
in https://github.com/influxdata/influxdb_iox/pull/5943.
This commit significantly changes the buffering, and querying, of data
from a partition, swapping out the existing "DataBuffer" for the new
state machine implementation (itself simplified due to temporary lack of
incremental snapshot generation, see #5944).
This commit simplifies the query path, removing multiple types that
wrapped one-another to pass around various state necessary to perform a
query, with various query functions needing different types or
combinations of types. The query path now operates using a single type
(named "QueryAdaptor") that provides a queryable interface over the set
of RecordBatch returned from a partition.
There is significantly increased testing of the PartitionData itself,
covering data in various states and the ordering of returned RecordBatch
(to ensure correct materialisation of updates). There are also
invariants upheld by the type system / compiler to minimise the
complexities of working with empty batches & states, and many asserts
that ensure (mostly existing!) invariants are upheld.
|
diff --git a/Cargo.lock b/Cargo.lock
index 6285ded57d..4238ca6ff7 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -2279,7 +2279,6 @@ dependencies = [
"assert_matches",
"async-trait",
"backoff",
- "bitflags",
"bytes",
"chrono",
"data_types",
@@ -2293,6 +2292,7 @@ dependencies = [
"iox_catalog",
"iox_query",
"iox_time",
+ "lazy_static",
"metric",
"mutable_batch",
"mutable_batch_lp",
diff --git a/ingester/Cargo.toml b/ingester/Cargo.toml
index 857900e923..910493463c 100644
--- a/ingester/Cargo.toml
+++ b/ingester/Cargo.toml
@@ -47,11 +47,11 @@ write_summary = { path = "../write_summary" }
tokio-util = { version = "0.7.4" }
trace = { path = "../trace" }
rand = "0.8.5"
+once_cell = "1"
[dev-dependencies]
assert_matches = "1.5.0"
-bitflags = {version = "1.3.2"}
-once_cell = "1"
+lazy_static = "1.4.0"
paste = "1.0.9"
test_helpers = { path = "../test_helpers", features = ["future_timeout"] }
tokio-stream = {version = "0.1.11", default_features = false }
diff --git a/ingester/src/compact.rs b/ingester/src/compact.rs
index ba08c693ee..7c57dd7510 100644
--- a/ingester/src/compact.rs
+++ b/ingester/src/compact.rs
@@ -11,7 +11,7 @@ use iox_query::{
use schema::sort::{adjust_sort_key_columns, compute_sort_key, SortKey};
use snafu::{ResultExt, Snafu};
-use crate::{data::partition::PersistingBatch, query::QueryableBatch};
+use crate::query_adaptor::QueryAdaptor;
#[derive(Debug, Snafu)]
#[allow(missing_copy_implementations, missing_docs)]
@@ -85,14 +85,14 @@ impl std::fmt::Debug for CompactedStream {
}
}
-/// Compact a given persisting batch into a [`CompactedStream`] or
-/// `None` if there is no data to compact.
+/// Compact a given batch into a [`CompactedStream`] or `None` if there is no
+/// data to compact, returning an updated sort key, if any.
pub(crate) async fn compact_persisting_batch(
executor: &Executor,
sort_key: Option<SortKey>,
- batch: Arc<PersistingBatch>,
+ batch: QueryAdaptor,
) -> Result<CompactedStream> {
- assert!(!batch.data.data.is_empty());
+ assert!(!batch.record_batches().is_empty());
// Get sort key from the catalog or compute it from
// cardinality.
@@ -104,12 +104,12 @@ pub(crate) async fn compact_persisting_batch(
//
// If there are any new columns, add them to the end of the sort key in the catalog and
// return that to be updated in the catalog.
- adjust_sort_key_columns(&sk, &batch.data.schema().primary_key())
+ adjust_sort_key_columns(&sk, &batch.schema().primary_key())
}
None => {
let sort_key = compute_sort_key(
- batch.data.schema().as_ref(),
- batch.data.data.iter().map(|sb| sb.data.as_ref()),
+ batch.schema().as_ref(),
+ batch.record_batches().iter().map(|sb| sb.as_ref()),
);
// Use the sort key computed from the cardinality as the sort key for this parquet
// file's metadata, also return the sort key to be stored in the catalog
@@ -118,7 +118,7 @@ pub(crate) async fn compact_persisting_batch(
};
// Compact
- let stream = compact(executor, Arc::clone(&batch.data), data_sort_key.clone()).await?;
+ let stream = compact(executor, Arc::new(batch), data_sort_key.clone()).await?;
Ok(CompactedStream {
stream,
@@ -127,10 +127,10 @@ pub(crate) async fn compact_persisting_batch(
})
}
-/// Compact a given Queryable Batch
+/// Compact a given batch without updating the sort key.
pub(crate) async fn compact(
executor: &Executor,
- data: Arc<QueryableBatch>,
+ data: Arc<QueryAdaptor>,
sort_key: SortKey,
) -> Result<SendableRecordBatchStream> {
// Build logical plan for compaction
@@ -157,9 +157,9 @@ pub(crate) async fn compact(
#[cfg(test)]
mod tests {
use arrow_util::assert_batches_eq;
+ use data_types::PartitionId;
use mutable_batch_lp::lines_to_batches;
use schema::selection::Selection;
- use uuid::Uuid;
use super::*;
use crate::test_util::{
@@ -169,14 +169,14 @@ mod tests {
create_batches_with_influxtype_same_columns_different_type,
create_one_record_batch_with_influxtype_duplicates,
create_one_record_batch_with_influxtype_no_duplicates,
- create_one_row_record_batch_with_influxtype, make_persisting_batch, make_queryable_batch,
+ create_one_row_record_batch_with_influxtype,
};
// this test was added to guard against https://github.com/influxdata/influxdb_iox/issues/3782
// where if sending in a single row it would compact into an output of two batches, one of
// which was empty, which would cause this to panic.
#[tokio::test]
- async fn test_compact_persisting_batch_on_one_record_batch_with_one_row() {
+ async fn test_compact_batch_on_one_record_batch_with_one_row() {
// create input data
let batch = lines_to_batches("cpu bar=2 20", 0)
.unwrap()
@@ -184,26 +184,15 @@ mod tests {
.unwrap()
.to_arrow(Selection::All)
.unwrap();
- let batches = vec![Arc::new(batch)];
- // build persisting batch from the input batches
- let uuid = Uuid::new_v4();
- let table_name = "test_table";
- let shard_id = 1;
- let seq_num_start: i64 = 1;
- let table_id = 1;
- let partition_id = 1;
- let persisting_batch = make_persisting_batch(
- shard_id,
- seq_num_start,
- table_id,
- table_name,
- partition_id,
- uuid,
- batches,
+
+ let batch = QueryAdaptor::new(
+ "test_table".into(),
+ PartitionId::new(1),
+ vec![Arc::new(batch)],
);
// verify PK
- let schema = persisting_batch.data.schema();
+ let schema = batch.schema();
let pk = schema.primary_key();
let expected_pk = vec!["time"];
assert_eq!(expected_pk, pk);
@@ -211,7 +200,7 @@ mod tests {
// compact
let exc = Executor::new(1);
let CompactedStream { stream, .. } =
- compact_persisting_batch(&exc, Some(SortKey::empty()), persisting_batch)
+ compact_persisting_batch(&exc, Some(SortKey::empty()), batch)
.await
.unwrap();
@@ -232,29 +221,16 @@ mod tests {
}
#[tokio::test]
- async fn test_compact_persisting_batch_on_one_record_batch_no_dupilcates() {
+ async fn test_compact_batch_on_one_record_batch_no_dupilcates() {
// create input data
- let batches = create_one_record_batch_with_influxtype_no_duplicates().await;
-
- // build persisting batch from the input batches
- let uuid = Uuid::new_v4();
- let table_name = "test_table";
- let shard_id = 1;
- let seq_num_start: i64 = 1;
- let table_id = 1;
- let partition_id = 1;
- let persisting_batch = make_persisting_batch(
- shard_id,
- seq_num_start,
- table_id,
- table_name,
- partition_id,
- uuid,
- batches,
+ let batch = QueryAdaptor::new(
+ "test_table".into(),
+ PartitionId::new(1),
+ create_one_record_batch_with_influxtype_no_duplicates().await,
);
// verify PK
- let schema = persisting_batch.data.schema();
+ let schema = batch.schema();
let pk = schema.primary_key();
let expected_pk = vec!["tag1", "time"];
assert_eq!(expected_pk, pk);
@@ -265,7 +241,7 @@ mod tests {
stream,
data_sort_key,
catalog_sort_key_update,
- } = compact_persisting_batch(&exc, Some(SortKey::empty()), persisting_batch)
+ } = compact_persisting_batch(&exc, Some(SortKey::empty()), batch)
.await
.unwrap();
@@ -295,29 +271,16 @@ mod tests {
}
#[tokio::test]
- async fn test_compact_persisting_batch_no_sort_key() {
+ async fn test_compact_batch_no_sort_key() {
// create input data
- let batches = create_batches_with_influxtype_different_cardinality().await;
-
- // build persisting batch from the input batches
- let uuid = Uuid::new_v4();
- let table_name = "test_table";
- let shard_id = 1;
- let seq_num_start: i64 = 1;
- let table_id = 1;
- let partition_id = 1;
- let persisting_batch = make_persisting_batch(
- shard_id,
- seq_num_start,
- table_id,
- table_name,
- partition_id,
- uuid,
- batches,
+ let batch = QueryAdaptor::new(
+ "test_table".into(),
+ PartitionId::new(1),
+ create_batches_with_influxtype_different_cardinality().await,
);
// verify PK
- let schema = persisting_batch.data.schema();
+ let schema = batch.schema();
let pk = schema.primary_key();
let expected_pk = vec!["tag1", "tag3", "time"];
assert_eq!(expected_pk, pk);
@@ -329,7 +292,7 @@ mod tests {
stream,
data_sort_key,
catalog_sort_key_update,
- } = compact_persisting_batch(&exc, Some(SortKey::empty()), persisting_batch)
+ } = compact_persisting_batch(&exc, Some(SortKey::empty()), batch)
.await
.unwrap();
@@ -363,29 +326,16 @@ mod tests {
}
#[tokio::test]
- async fn test_compact_persisting_batch_with_specified_sort_key() {
+ async fn test_compact_batch_with_specified_sort_key() {
// create input data
- let batches = create_batches_with_influxtype_different_cardinality().await;
-
- // build persisting batch from the input batches
- let uuid = Uuid::new_v4();
- let table_name = "test_table";
- let shard_id = 1;
- let seq_num_start: i64 = 1;
- let table_id = 1;
- let partition_id = 1;
- let persisting_batch = make_persisting_batch(
- shard_id,
- seq_num_start,
- table_id,
- table_name,
- partition_id,
- uuid,
- batches,
+ let batch = QueryAdaptor::new(
+ "test_table".into(),
+ PartitionId::new(1),
+ create_batches_with_influxtype_different_cardinality().await,
);
// verify PK
- let schema = persisting_batch.data.schema();
+ let schema = batch.schema();
let pk = schema.primary_key();
let expected_pk = vec!["tag1", "tag3", "time"];
assert_eq!(expected_pk, pk);
@@ -401,7 +351,7 @@ mod tests {
} = compact_persisting_batch(
&exc,
Some(SortKey::from_columns(["tag3", "tag1", "time"])),
- persisting_batch,
+ batch,
)
.await
.unwrap();
@@ -435,29 +385,16 @@ mod tests {
}
#[tokio::test]
- async fn test_compact_persisting_batch_new_column_for_sort_key() {
+ async fn test_compact_batch_new_column_for_sort_key() {
// create input data
- let batches = create_batches_with_influxtype_different_cardinality().await;
-
- // build persisting batch from the input batches
- let uuid = Uuid::new_v4();
- let table_name = "test_table";
- let shard_id = 1;
- let seq_num_start: i64 = 1;
- let table_id = 1;
- let partition_id = 1;
- let persisting_batch = make_persisting_batch(
- shard_id,
- seq_num_start,
- table_id,
- table_name,
- partition_id,
- uuid,
- batches,
+ let batch = QueryAdaptor::new(
+ "test_table".into(),
+ PartitionId::new(1),
+ create_batches_with_influxtype_different_cardinality().await,
);
// verify PK
- let schema = persisting_batch.data.schema();
+ let schema = batch.schema();
let pk = schema.primary_key();
let expected_pk = vec!["tag1", "tag3", "time"];
assert_eq!(expected_pk, pk);
@@ -471,13 +408,9 @@ mod tests {
stream,
data_sort_key,
catalog_sort_key_update,
- } = compact_persisting_batch(
- &exc,
- Some(SortKey::from_columns(["tag3", "time"])),
- persisting_batch,
- )
- .await
- .unwrap();
+ } = compact_persisting_batch(&exc, Some(SortKey::from_columns(["tag3", "time"])), batch)
+ .await
+ .unwrap();
let output_batches = datafusion::physical_plan::common::collect(stream)
.await
@@ -511,29 +444,16 @@ mod tests {
}
#[tokio::test]
- async fn test_compact_persisting_batch_missing_column_for_sort_key() {
+ async fn test_compact_batch_missing_column_for_sort_key() {
// create input data
- let batches = create_batches_with_influxtype_different_cardinality().await;
-
- // build persisting batch from the input batches
- let uuid = Uuid::new_v4();
- let table_name = "test_table";
- let shard_id = 1;
- let seq_num_start: i64 = 1;
- let table_id = 1;
- let partition_id = 1;
- let persisting_batch = make_persisting_batch(
- shard_id,
- seq_num_start,
- table_id,
- table_name,
- partition_id,
- uuid,
- batches,
+ let batch = QueryAdaptor::new(
+ "test_table".into(),
+ PartitionId::new(1),
+ create_batches_with_influxtype_different_cardinality().await,
);
// verify PK
- let schema = persisting_batch.data.schema();
+ let schema = batch.schema();
let pk = schema.primary_key();
let expected_pk = vec!["tag1", "tag3", "time"];
assert_eq!(expected_pk, pk);
@@ -550,7 +470,7 @@ mod tests {
} = compact_persisting_batch(
&exc,
Some(SortKey::from_columns(["tag3", "tag1", "tag4", "time"])),
- persisting_batch,
+ batch,
)
.await
.unwrap();
@@ -588,26 +508,25 @@ mod tests {
test_helpers::maybe_start_logging();
// create input data
- let batches = create_one_row_record_batch_with_influxtype().await;
-
- // build queryable batch from the input batches
- let compact_batch = make_queryable_batch("test_table", 0, 1, batches);
+ let batch = QueryAdaptor::new(
+ "test_table".into(),
+ PartitionId::new(1),
+ create_one_row_record_batch_with_influxtype().await,
+ );
// verify PK
- let schema = compact_batch.schema();
+ let schema = batch.schema();
let pk = schema.primary_key();
let expected_pk = vec!["tag1", "time"];
assert_eq!(expected_pk, pk);
- let sort_key = compute_sort_key(
- &schema,
- compact_batch.data.iter().map(|sb| sb.data.as_ref()),
- );
+ let sort_key =
+ compute_sort_key(&schema, batch.record_batches().iter().map(|rb| rb.as_ref()));
assert_eq!(sort_key, SortKey::from_columns(["tag1", "time"]));
// compact
let exc = Executor::new(1);
- let stream = compact(&exc, compact_batch, sort_key).await.unwrap();
+ let stream = compact(&exc, Arc::new(batch), sort_key).await.unwrap();
let output_batches = datafusion::physical_plan::common::collect(stream)
.await
.unwrap();
@@ -629,26 +548,25 @@ mod tests {
#[tokio::test]
async fn test_compact_one_batch_with_duplicates() {
// create input data
- let batches = create_one_record_batch_with_influxtype_duplicates().await;
-
- // build queryable batch from the input batches
- let compact_batch = make_queryable_batch("test_table", 0, 1, batches);
+ let batch = QueryAdaptor::new(
+ "test_table".into(),
+ PartitionId::new(1),
+ create_one_record_batch_with_influxtype_duplicates().await,
+ );
// verify PK
- let schema = compact_batch.schema();
+ let schema = batch.schema();
let pk = schema.primary_key();
let expected_pk = vec!["tag1", "time"];
assert_eq!(expected_pk, pk);
- let sort_key = compute_sort_key(
- &schema,
- compact_batch.data.iter().map(|sb| sb.data.as_ref()),
- );
+ let sort_key =
+ compute_sort_key(&schema, batch.record_batches().iter().map(|rb| rb.as_ref()));
assert_eq!(sort_key, SortKey::from_columns(["tag1", "time"]));
// compact
let exc = Executor::new(1);
- let stream = compact(&exc, compact_batch, sort_key).await.unwrap();
+ let stream = compact(&exc, Arc::new(batch), sort_key).await.unwrap();
let output_batches = datafusion::physical_plan::common::collect(stream)
.await
.unwrap();
@@ -678,26 +596,25 @@ mod tests {
#[tokio::test]
async fn test_compact_many_batches_same_columns_with_duplicates() {
// create many-batches input data
- let batches = create_batches_with_influxtype().await;
-
- // build queryable batch from the input batches
- let compact_batch = make_queryable_batch("test_table", 0, 1, batches);
+ let batch = QueryAdaptor::new(
+ "test_table".into(),
+ PartitionId::new(1),
+ create_batches_with_influxtype().await,
+ );
// verify PK
- let schema = compact_batch.schema();
+ let schema = batch.schema();
let pk = schema.primary_key();
let expected_pk = vec!["tag1", "time"];
assert_eq!(expected_pk, pk);
- let sort_key = compute_sort_key(
- &schema,
- compact_batch.data.iter().map(|sb| sb.data.as_ref()),
- );
+ let sort_key =
+ compute_sort_key(&schema, batch.record_batches().iter().map(|rb| rb.as_ref()));
assert_eq!(sort_key, SortKey::from_columns(["tag1", "time"]));
// compact
let exc = Executor::new(1);
- let stream = compact(&exc, compact_batch, sort_key).await.unwrap();
+ let stream = compact(&exc, Arc::new(batch), sort_key).await.unwrap();
let output_batches = datafusion::physical_plan::common::collect(stream)
.await
.unwrap();
@@ -724,26 +641,25 @@ mod tests {
#[tokio::test]
async fn test_compact_many_batches_different_columns_with_duplicates() {
// create many-batches input data
- let batches = create_batches_with_influxtype_different_columns().await;
-
- // build queryable batch from the input batches
- let compact_batch = make_queryable_batch("test_table", 0, 1, batches);
+ let batch = QueryAdaptor::new(
+ "test_table".into(),
+ PartitionId::new(1),
+ create_batches_with_influxtype_different_columns().await,
+ );
// verify PK
- let schema = compact_batch.schema();
+ let schema = batch.schema();
let pk = schema.primary_key();
let expected_pk = vec!["tag1", "tag2", "time"];
assert_eq!(expected_pk, pk);
- let sort_key = compute_sort_key(
- &schema,
- compact_batch.data.iter().map(|sb| sb.data.as_ref()),
- );
+ let sort_key =
+ compute_sort_key(&schema, batch.record_batches().iter().map(|rb| rb.as_ref()));
assert_eq!(sort_key, SortKey::from_columns(["tag1", "tag2", "time"]));
// compact
let exc = Executor::new(1);
- let stream = compact(&exc, compact_batch, sort_key).await.unwrap();
+ let stream = compact(&exc, Arc::new(batch), sort_key).await.unwrap();
let output_batches = datafusion::physical_plan::common::collect(stream)
.await
.unwrap();
@@ -774,85 +690,32 @@ mod tests {
#[tokio::test]
async fn test_compact_many_batches_different_columns_different_order_with_duplicates() {
// create many-batches input data
- let batches = create_batches_with_influxtype_different_columns_different_order().await;
-
- // build queryable batch from the input batches
- let compact_batch = make_queryable_batch("test_table", 0, 1, batches);
-
- // verify PK
- let schema = compact_batch.schema();
- let pk = schema.primary_key();
- let expected_pk = vec!["tag1", "tag2", "time"];
- assert_eq!(expected_pk, pk);
-
- let sort_key = compute_sort_key(
- &schema,
- compact_batch.data.iter().map(|sb| sb.data.as_ref()),
+ let batch = QueryAdaptor::new(
+ "test_table".into(),
+ PartitionId::new(1),
+ create_batches_with_influxtype_different_columns_different_order().await,
);
- assert_eq!(sort_key, SortKey::from_columns(["tag1", "tag2", "time"]));
-
- // compact
- let exc = Executor::new(1);
- let stream = compact(&exc, compact_batch, sort_key).await.unwrap();
- let output_batches = datafusion::physical_plan::common::collect(stream)
- .await
- .unwrap();
-
- // verify compacted data
- // data is sorted and all duplicates are removed
- // CORRECT RESULT
- let expected = vec![
- "+-----------+------+------+--------------------------------+",
- "| field_int | tag1 | tag2 | time |",
- "+-----------+------+------+--------------------------------+",
- "| 5 | | AL | 1970-01-01T00:00:00.000005Z |",
- "| 10 | | AL | 1970-01-01T00:00:00.000007Z |",
- "| 70 | | CT | 1970-01-01T00:00:00.000000100Z |",
- "| 1000 | | CT | 1970-01-01T00:00:00.000001Z |",
- "| 100 | | MA | 1970-01-01T00:00:00.000000050Z |",
- "| 10 | AL | MA | 1970-01-01T00:00:00.000000050Z |",
- "| 70 | CT | CT | 1970-01-01T00:00:00.000000100Z |",
- "| 70 | CT | CT | 1970-01-01T00:00:00.000000500Z |",
- "| 30 | MT | AL | 1970-01-01T00:00:00.000000005Z |",
- "| 20 | MT | AL | 1970-01-01T00:00:00.000007Z |",
- "| 1000 | MT | CT | 1970-01-01T00:00:00.000001Z |",
- "| 1000 | MT | CT | 1970-01-01T00:00:00.000002Z |",
- "+-----------+------+------+--------------------------------+",
- ];
-
- assert_batches_eq!(&expected, &output_batches);
- }
-
- // BUG
- #[tokio::test]
- async fn test_compact_many_batches_different_columns_different_order_with_duplicates2() {
- // create many-batches input data
- let batches = create_batches_with_influxtype_different_columns_different_order().await;
-
- // build queryable batch from the input batches
- let compact_batch = make_queryable_batch("test_table", 0, 1, batches);
// verify PK
- let schema = compact_batch.schema();
+ let schema = batch.schema();
let pk = schema.primary_key();
let expected_pk = vec!["tag1", "tag2", "time"];
assert_eq!(expected_pk, pk);
- let sort_key = compute_sort_key(
- &schema,
- compact_batch.data.iter().map(|sb| sb.data.as_ref()),
- );
+ let sort_key =
+ compute_sort_key(&schema, batch.record_batches().iter().map(|rb| rb.as_ref()));
assert_eq!(sort_key, SortKey::from_columns(["tag1", "tag2", "time"]));
// compact
let exc = Executor::new(1);
- let stream = compact(&exc, compact_batch, sort_key).await.unwrap();
+ let stream = compact(&exc, Arc::new(batch), sort_key).await.unwrap();
let output_batches = datafusion::physical_plan::common::collect(stream)
.await
.unwrap();
// verify compacted data
// data is sorted and all duplicates are removed
+ // CORRECT RESULT
let expected = vec![
"+-----------+------+------+--------------------------------+",
"| field_int | tag1 | tag2 | time |",
@@ -879,12 +742,13 @@ mod tests {
#[should_panic(expected = "Schemas compatible")]
async fn test_compact_many_batches_same_columns_different_types() {
// create many-batches input data
- let batches = create_batches_with_influxtype_same_columns_different_type().await;
-
- // build queryable batch from the input batches
- let compact_batch = make_queryable_batch("test_table", 0, 1, batches);
+ let batch = QueryAdaptor::new(
+ "test_table".into(),
+ PartitionId::new(1),
+ create_batches_with_influxtype_same_columns_different_type().await,
+ );
- // the schema merge will thorw a panic
- compact_batch.schema();
+ // the schema merge should throw a panic
+ batch.schema();
}
}
diff --git a/ingester/src/data.rs b/ingester/src/data.rs
index 0f0270d910..70131538b7 100644
--- a/ingester/src/data.rs
+++ b/ingester/src/data.rs
@@ -20,6 +20,7 @@ use parquet_file::{
storage::{ParquetStorage, StorageId},
};
use snafu::{OptionExt, Snafu};
+use uuid::Uuid;
use write_summary::ShardProgress;
use crate::{
@@ -29,9 +30,12 @@ use crate::{
pub(crate) mod namespace;
pub mod partition;
+mod sequence_range;
pub(crate) mod shard;
pub(crate) mod table;
+pub(crate) use sequence_range::*;
+
use self::{partition::resolver::PartitionProvider, shard::ShardData};
#[cfg(test)]
@@ -245,26 +249,32 @@ impl Persister for IngesterData {
) {
// lookup the state from the ingester data. If something isn't found,
// it's unexpected. Crash so someone can take a look.
- let shard_data = self
+ let namespace = self
.shards
.get(&shard_id)
- .unwrap_or_else(|| panic!("shard state for {shard_id} not in ingester data"));
- let namespace = shard_data
- .namespace_by_id(namespace_id)
+ .and_then(|s| s.namespace_by_id(namespace_id))
.unwrap_or_else(|| panic!("namespace {namespace_id} not in shard {shard_id} state"));
let namespace_name = namespace.namespace_name();
+ // Assert the namespace ID matches the index key.
+ assert_eq!(namespace.namespace_id(), namespace_id);
let table_data = namespace.table_id(table_id).unwrap_or_else(|| {
panic!("table {table_id} in namespace {namespace_id} not in shard {shard_id} state")
});
- let partition_key;
let table_name;
- let batch;
+ let partition_key;
let sort_key;
let last_persisted_sequence_number;
+ let batch;
+ let batch_sequence_number_range;
{
let mut guard = table_data.write().await;
+ // Assert various properties of the table to ensure the index is
+ // correct, out of an abundance of caution.
+ assert_eq!(guard.shard_id(), shard_id);
+ assert_eq!(guard.namespace_id(), namespace_id);
+ assert_eq!(guard.table_id(), table_id);
table_name = guard.table_name().clone();
let partition = guard.get_partition(partition_id).unwrap_or_else(|| {
@@ -273,12 +283,34 @@ impl Persister for IngesterData {
)
});
+ // Assert various properties of the partition to ensure the index is
+ // correct, out of an abundance of caution.
+ assert_eq!(partition.partition_id(), partition_id);
+ assert_eq!(partition.shard_id(), shard_id);
+ assert_eq!(partition.namespace_id(), namespace_id);
+ assert_eq!(partition.table_id(), table_id);
+ assert_eq!(*partition.table_name(), table_name);
+
partition_key = partition.partition_key().clone();
- batch = partition.snapshot_to_persisting_batch();
sort_key = partition.sort_key().clone();
last_persisted_sequence_number = partition.max_persisted_sequence_number();
+
+ // The sequence number MUST be read without releasing the write lock
+ // to ensure a consistent snapshot of batch contents and batch
+ // sequence number range.
+ batch = partition.mark_persisting();
+ batch_sequence_number_range = partition.sequence_number_range();
};
+ // From this point on, the code MUST be infallible.
+ //
+ // The partition data was moved to the persisting slot, and any
+ // subsequent calls would be an error.
+ //
+ // This is NOT an invariant, and this could be changed in the future to
+ // allow partitions to marked as persisting repeatedly. Today however,
+ // the code is infallible (or rather, terminal - it does cause a retry).
+
let sort_key = sort_key.get().await;
trace!(
%shard_id,
@@ -306,8 +338,13 @@ impl Persister for IngesterData {
// Check if there is any data to persist.
let batch = match batch {
- Some(v) if !v.data.data.is_empty() => v,
- _ => {
+ Some(v) => {
+ // The partition state machine will NOT return an empty batch.
+ assert!(!v.record_batches().is_empty());
+ v
+ }
+ None => {
+ // But it MAY return no batch at all.
warn!(
%shard_id,
%namespace_id,
@@ -322,17 +359,6 @@ impl Persister for IngesterData {
}
};
- assert_eq!(batch.shard_id(), shard_id);
- assert_eq!(batch.table_id(), table_id);
- assert_eq!(batch.partition_id(), partition_id);
-
- // Read the maximum SequenceNumber in the batch.
- let (_min, max_sequence_number) = batch.data.min_max_sequence_numbers();
-
- // Read the future object store ID before passing the batch into
- // compaction, instead of retaining a copy of the data post-compaction.
- let object_store_id = batch.object_store_id();
-
// do the CPU intensive work of compaction, de-duplication and sorting
let CompactedStream {
stream: record_stream,
@@ -342,6 +368,10 @@ impl Persister for IngesterData {
.await
.expect("unable to compact persisting batch");
+ // Generate a UUID to uniquely identify this parquet file in object
+ // storage.
+ let object_store_id = Uuid::new_v4();
+
// Construct the metadata for this parquet file.
let iox_metadata = IoxMetadata {
object_store_id,
@@ -353,7 +383,7 @@ impl Persister for IngesterData {
table_name: Arc::clone(&*table_name),
partition_id,
partition_key: partition_key.clone(),
- max_sequence_number,
+ max_sequence_number: batch_sequence_number_range.inclusive_max().unwrap(),
compaction_level: CompactionLevel::Initial,
sort_key: Some(data_sort_key),
};
@@ -503,15 +533,28 @@ impl Persister for IngesterData {
.recorder(attributes)
.record(file_size as u64);
- // and remove the persisted data from memory
- namespace
- .mark_persisted(
- &table_name,
- &partition_key,
- iox_metadata.max_sequence_number,
- )
- .await;
- debug!(
+ // Mark the partition as having completed persistence, causing it to
+ // release the reference to the in-flight persistence data it is
+ // holding.
+ //
+ // This SHOULD cause the data to be dropped, but there MAY be ongoing
+ // queries that currently hold a reference to the data. In either case,
+ // the persisted data will be dropped "shortly".
+ table_data
+ .write()
+ .await
+ .get_partition(partition_id)
+ .unwrap()
+ .mark_persisted(iox_metadata.max_sequence_number);
+
+ // BUG: ongoing queries retain references to the persisting data,
+ // preventing it from being dropped, but memory is released back to
+ // lifecycle memory tracker when this fn returns.
+ //
+ // https://github.com/influxdata/influxdb_iox/issues/5805
+ //
+
+ info!(
%object_store_id,
%shard_id,
%namespace_id,
@@ -521,7 +564,7 @@ impl Persister for IngesterData {
%partition_id,
%partition_key,
max_sequence_number=%iox_metadata.max_sequence_number.get(),
- "marked partition as persisted"
+ "persisted partition"
);
}
@@ -656,8 +699,21 @@ mod tests {
.await
.unwrap();
assert_matches!(action, DmlApplyAction::Applied(false));
+
+ let w2 = DmlWrite::new(
+ "foo",
+ lines_to_batches("mem foo=1 10", 0).unwrap(),
+ Some("1970-01-01".into()),
+ DmlMeta::sequenced(
+ Sequence::new(ShardIndex::new(1), SequenceNumber::new(2)),
+ ignored_ts,
+ None,
+ 50,
+ ),
+ );
+
let action = data
- .buffer_operation(shard1.id, DmlOperation::Write(w1), &manager.handle())
+ .buffer_operation(shard1.id, DmlOperation::Write(w2), &manager.handle())
.await
.unwrap();
assert_matches!(action, DmlApplyAction::Applied(true));
@@ -1016,11 +1072,15 @@ mod tests {
assert_eq!(buckets_with_counts, &[500 * 1024]);
let mem_table = n.table_data(&"mem".into()).unwrap();
- let mem_table = mem_table.read().await;
// verify that the parquet_max_sequence_number got updated
assert_eq!(
- mem_table.parquet_max_sequence_number(),
+ mem_table
+ .write()
+ .await
+ .get_partition(partition_id)
+ .unwrap()
+ .max_persisted_sequence_number(),
Some(SequenceNumber::new(2))
);
@@ -1310,13 +1370,17 @@ mod tests {
.unwrap();
{
let table_data = data.table_data(&"mem".into()).unwrap();
- let table = table_data.read().await;
- let p = table.get_partition_by_key(&"1970-01-01".into()).unwrap();
+ let mut table = table_data.write().await;
+ assert!(table
+ .partition_iter_mut()
+ .all(|p| p.get_query_data().is_none()));
assert_eq!(
- p.max_persisted_sequence_number(),
+ table
+ .get_partition_by_key(&"1970-01-01".into())
+ .unwrap()
+ .max_persisted_sequence_number(),
Some(SequenceNumber::new(1))
);
- assert!(p.data.buffer.is_none());
}
assert_matches!(action, DmlApplyAction::Skipped);
@@ -1329,8 +1393,8 @@ mod tests {
let table = table_data.read().await;
let partition = table.get_partition_by_key(&"1970-01-01".into()).unwrap();
assert_eq!(
- partition.data.buffer.as_ref().unwrap().min_sequence_number,
- SequenceNumber::new(2)
+ partition.sequence_number_range().inclusive_min(),
+ Some(SequenceNumber::new(2))
);
assert_matches!(data.table_count().observe(), Observation::U64Counter(v) => {
diff --git a/ingester/src/data/namespace.rs b/ingester/src/data/namespace.rs
index 500345dcf3..74d6449f9d 100644
--- a/ingester/src/data/namespace.rs
+++ b/ingester/src/data/namespace.rs
@@ -2,7 +2,7 @@
use std::{collections::HashMap, sync::Arc};
-use data_types::{NamespaceId, PartitionKey, SequenceNumber, ShardId, TableId};
+use data_types::{NamespaceId, SequenceNumber, ShardId, TableId};
use dml::DmlOperation;
use iox_catalog::interface::Catalog;
use metric::U64Counter;
@@ -253,52 +253,7 @@ impl NamespaceData {
}
}
- /// Snapshots the mutable buffer for the partition, which clears it out and moves it over to
- /// snapshots. Then return a vec of the snapshots and the optional persisting batch.
- #[cfg(test)] // Only used in tests
- pub(crate) async fn snapshot(
- &self,
- table_name: &TableName,
- partition_key: &PartitionKey,
- ) -> Option<(
- Vec<Arc<super::partition::SnapshotBatch>>,
- Option<Arc<super::partition::PersistingBatch>>,
- )> {
- if let Some(t) = self.table_data(table_name) {
- let mut t = t.write().await;
-
- return t.get_partition_by_key_mut(partition_key).map(|p| {
- p.data
- .generate_snapshot()
- .expect("snapshot on mutable batch should never fail");
- (p.data.snapshots.to_vec(), p.data.persisting.clone())
- });
- }
-
- None
- }
-
- /// Snapshots the mutable buffer for the partition, which clears it out and then moves all
- /// snapshots over to a persisting batch, which is returned. If there is no data to snapshot
- /// or persist, None will be returned.
- #[cfg(test)] // Only used in tests
- pub(crate) async fn snapshot_to_persisting(
- &self,
- table_name: &TableName,
- partition_key: &PartitionKey,
- ) -> Option<Arc<super::partition::PersistingBatch>> {
- if let Some(table_data) = self.table_data(table_name) {
- let mut table_data = table_data.write().await;
-
- return table_data
- .get_partition_by_key_mut(partition_key)
- .and_then(|partition_data| partition_data.snapshot_to_persisting_batch());
- }
-
- None
- }
-
- /// Gets the buffered table data
+ /// Return the specified [`TableData`] if it exists.
pub(crate) fn table_data(
&self,
table_name: &TableName,
@@ -353,30 +308,11 @@ impl NamespaceData {
})
}
- /// Walks down the table and partition and clears the persisting batch. The sequence number is
- /// the max_sequence_number for the persisted parquet file, which should be kept in the table
- /// data buffer.
- pub(super) async fn mark_persisted(
- &self,
- table_name: &TableName,
- partition_key: &PartitionKey,
- sequence_number: SequenceNumber,
- ) {
- if let Some(t) = self.table_data(table_name) {
- let mut t = t.write().await;
- let partition = t.get_partition_by_key_mut(partition_key);
-
- if let Some(p) = partition {
- p.mark_persisted(sequence_number);
- }
- }
- }
-
/// Return progress from this Namespace
pub(super) async fn progress(&self) -> ShardProgress {
let tables: Vec<_> = self.tables.read().by_id.values().map(Arc::clone).collect();
- // Consolidate progtress across partitions.
+ // Consolidate progress across partitions.
let mut progress = ShardProgress::new()
// Properly account for any sequence number that is
// actively buffering and thus not yet completely
@@ -442,7 +378,7 @@ impl<'a> Drop for ScopedSequenceNumber<'a> {
mod tests {
use std::sync::Arc;
- use data_types::{PartitionId, ShardIndex};
+ use data_types::{PartitionId, PartitionKey, ShardIndex};
use metric::{Attributes, Metric};
use crate::{
diff --git a/ingester/src/data/partition.rs b/ingester/src/data/partition.rs
index 1333849165..d6b0ded343 100644
--- a/ingester/src/data/partition.rs
+++ b/ingester/src/data/partition.rs
@@ -2,125 +2,23 @@
use std::sync::Arc;
-use arrow::record_batch::RecordBatch;
use data_types::{NamespaceId, PartitionId, PartitionKey, SequenceNumber, ShardId, TableId};
use mutable_batch::MutableBatch;
use observability_deps::tracing::*;
-use schema::{selection::Selection, sort::SortKey};
-use snafu::ResultExt;
-use uuid::Uuid;
+use schema::sort::SortKey;
use write_summary::ShardProgress;
use self::{
- buffer::{BufferBatch, DataBuffer},
+ buffer::{traits::Queryable, BufferState, DataBuffer, Persisting},
resolver::DeferredSortKey,
};
-use crate::{querier_handler::PartitionStatus, query::QueryableBatch};
+use crate::query_adaptor::QueryAdaptor;
-use super::table::TableName;
+use super::{sequence_range::SequenceNumberRange, table::TableName};
mod buffer;
pub mod resolver;
-/// Read only copy of the unpersisted data for a partition in the ingester for a specific partition.
-#[derive(Debug)]
-pub(crate) struct UnpersistedPartitionData {
- pub(crate) partition_id: PartitionId,
- pub(crate) non_persisted: Vec<Arc<SnapshotBatch>>,
- pub(crate) persisting: Option<QueryableBatch>,
- pub(crate) partition_status: PartitionStatus,
-}
-
-/// PersistingBatch contains all needed info and data for creating
-/// a parquet file for given set of SnapshotBatches
-#[derive(Debug, PartialEq, Clone)]
-pub(crate) struct PersistingBatch {
- /// Shard id of the data
- pub(crate) shard_id: ShardId,
-
- /// Table id of the data
- pub(crate) table_id: TableId,
-
- /// Partition Id of the data
- pub(crate) partition_id: PartitionId,
-
- /// Id of to-be-created parquet file of this data
- pub(crate) object_store_id: Uuid,
-
- /// data
- pub(crate) data: Arc<QueryableBatch>,
-}
-
-impl PersistingBatch {
- pub(crate) fn object_store_id(&self) -> Uuid {
- self.object_store_id
- }
-
- pub(crate) fn shard_id(&self) -> ShardId {
- self.shard_id
- }
-
- pub(crate) fn table_id(&self) -> TableId {
- self.table_id
- }
-
- pub(crate) fn partition_id(&self) -> PartitionId {
- self.partition_id
- }
-}
-
-/// SnapshotBatch contains data of many contiguous BufferBatches
-#[derive(Debug, PartialEq)]
-pub(crate) struct SnapshotBatch {
- /// Min sequence number of its combined BufferBatches
- pub(crate) min_sequence_number: SequenceNumber,
- /// Max sequence number of its combined BufferBatches
- pub(crate) max_sequence_number: SequenceNumber,
- /// Data of its combined BufferBatches kept in one RecordBatch
- pub(crate) data: Arc<RecordBatch>,
-}
-
-impl SnapshotBatch {
- /// Return only data of the given columns
- pub(crate) fn scan(
- &self,
- selection: Selection<'_>,
- ) -> Result<Option<Arc<RecordBatch>>, super::Error> {
- Ok(match selection {
- Selection::All => Some(Arc::clone(&self.data)),
- Selection::Some(columns) => {
- let schema = self.data.schema();
-
- let indices = columns
- .iter()
- .filter_map(|&column_name| {
- match schema.index_of(column_name) {
- Ok(idx) => Some(idx),
- _ => None, // this batch does not include data of this column_name
- }
- })
- .collect::<Vec<_>>();
- if indices.is_empty() {
- None
- } else {
- Some(Arc::new(
- self.data
- .project(&indices)
- .context(super::FilterColumnSnafu {})?,
- ))
- }
- }
- })
- }
-
- /// Return progress in this data
- fn progress(&self) -> ShardProgress {
- ShardProgress::new()
- .with_buffered(self.min_sequence_number)
- .with_buffered(self.max_sequence_number)
- }
-}
-
/// The load state of the [`SortKey`] for a given partition.
#[derive(Debug, Clone)]
pub(crate) enum SortKeyState {
@@ -146,7 +44,7 @@ impl SortKeyState {
#[derive(Debug)]
pub struct PartitionData {
/// The catalog ID of the partition this buffer is for.
- id: PartitionId,
+ partition_id: PartitionId,
/// The string partition key for this partition.
partition_key: PartitionKey,
@@ -168,7 +66,11 @@ pub struct PartitionData {
/// The name of the table this partition is part of.
table_name: TableName,
- pub(super) data: DataBuffer,
+ /// A buffer for incoming writes.
+ buffer: DataBuffer,
+
+ /// The currently persisting [`DataBuffer`], if any.
+ persisting: Option<BufferState<Persisting>>,
/// The max_persisted_sequence number for any parquet_file in this
/// partition.
@@ -189,92 +91,249 @@ impl PartitionData {
max_persisted_sequence_number: Option<SequenceNumber>,
) -> Self {
Self {
- id,
+ partition_id: id,
partition_key,
sort_key,
shard_id,
namespace_id,
table_id,
table_name,
- data: Default::default(),
+ buffer: DataBuffer::default(),
+ persisting: None,
max_persisted_sequence_number,
}
}
- /// Snapshot anything in the buffer and move all snapshot data into a persisting batch
- pub(super) fn snapshot_to_persisting_batch(&mut self) -> Option<Arc<PersistingBatch>> {
- self.data
- .snapshot_to_persisting(self.shard_id, self.table_id, self.id, &self.table_name)
- }
-
- /// Snapshot whatever is in the buffer and return a new vec of the
- /// arc cloned snapshots
- #[cfg(test)]
- fn snapshot(&mut self) -> Result<Vec<Arc<SnapshotBatch>>, super::Error> {
- self.data
- .generate_snapshot()
- .context(super::SnapshotSnafu)?;
- Ok(self.data.get_snapshots().to_vec())
- }
-
- /// Return non persisting data
- pub(super) fn get_non_persisting_data(&self) -> Result<Vec<Arc<SnapshotBatch>>, super::Error> {
- self.data.buffer_and_snapshots()
- }
-
- /// Return persisting data
- pub(super) fn get_persisting_data(&self) -> Option<QueryableBatch> {
- self.data.get_persisting_data()
- }
-
- /// Write the given mb in the buffer
+ /// Buffer the given [`MutableBatch`] in memory, ordered by the specified
+ /// [`SequenceNumber`].
+ ///
+ /// # Panics
+ ///
+ /// This method panics if `sequence_number` is not strictly greater than
+ /// previous calls or the persisted maximum.
pub(super) fn buffer_write(
&mut self,
- sequence_number: SequenceNumber,
mb: MutableBatch,
+ sequence_number: SequenceNumber,
) -> Result<(), super::Error> {
- let (min_sequence_number, max_sequence_number) = match &mut self.data.buffer {
- Some(buf) => {
- buf.max_sequence_number = sequence_number.max(buf.max_sequence_number);
- buf.data.extend_from(&mb).context(super::BufferWriteSnafu)?;
- (buf.min_sequence_number, buf.max_sequence_number)
- }
- None => {
- self.data.buffer = Some(BufferBatch {
- min_sequence_number: sequence_number,
- max_sequence_number: sequence_number,
- data: mb,
- });
- (sequence_number, sequence_number)
- }
- };
+ // Ensure that this write is strictly after any persisted ops.
+ if let Some(min) = self.max_persisted_sequence_number {
+ assert!(sequence_number > min, "monotonicity violation");
+ }
+
+ // Buffer the write, which ensures monotonicity of writes within the
+ // buffer itself.
+ self.buffer
+ .buffer_write(mb, sequence_number)
+ .map_err(|e| super::Error::BufferWrite { source: e })?;
+
trace!(
- min_sequence_number=?min_sequence_number,
- max_sequence_number=?max_sequence_number,
+ shard_id = %self.shard_id,
+ namespace_id = %self.namespace_id,
+ table_id = %self.table_id,
+ table_name = %self.table_name,
+ partition_id = %self.partition_id,
+ partition_key = %self.partition_key,
+ min_sequence_number=?self.buffer.sequence_number_range().inclusive_min(),
+ max_sequence_number=?self.buffer.sequence_number_range().inclusive_max(),
"buffered write"
);
Ok(())
}
+ /// Return all data for this partition, ordered by the [`SequenceNumber`]
+ /// from which it was buffered with.
+ pub(crate) fn get_query_data(&mut self) -> Option<QueryAdaptor> {
+ // Extract the buffered data, if any.
+ let buffered_data = self.buffer.get_query_data();
+
+ // Prepend any currently persisting batches.
+ //
+ // The persisting RecordBatch instances MUST be ordered before the
+ // buffered data to preserve the ordering of writes such that updates to
+ // existing rows materialise to the correct output.
+ let data = self
+ .persisting
+ .iter()
+ .flat_map(|b| b.get_query_data())
+ .chain(buffered_data)
+ .collect::<Vec<_>>();
+
+ trace!(
+ shard_id = %self.shard_id,
+ namespace_id = %self.namespace_id,
+ table_id = %self.table_id,
+ table_name = %self.table_name,
+ partition_id = %self.partition_id,
+ partition_key = %self.partition_key,
+ min_sequence_number=?self.buffer.sequence_number_range().inclusive_min(),
+ max_sequence_number=?self.buffer.sequence_number_range().inclusive_max(),
+ max_persisted=?self.max_persisted_sequence_number(),
+ n_batches = data.len(),
+ "read partition data"
+ );
+
+ if data.is_empty() {
+ return None;
+ }
+
+ // Construct the query adaptor over the partition data.
+ //
+ // `data` MUST contain at least one row, or the constructor panics. This
+ // is upheld by the FSM, which ensures only non-empty snapshots /
+ // RecordBatch are generated. Because `data` contains at least one
+ // RecordBatch, this invariant holds.
+ Some(QueryAdaptor::new(
+ self.table_name.clone(),
+ self.partition_id,
+ data,
+ ))
+ }
+
+ /// Return the range of [`SequenceNumber`] currently queryable by calling
+ /// [`PartitionData::get_query_data()`].
+ ///
+ /// This includes buffered data, snapshots, and currently persisting data.
+ pub(super) fn sequence_number_range(&self) -> SequenceNumberRange {
+ self.persisting
+ .as_ref()
+ .map(|v| v.sequence_number_range().clone())
+ .unwrap_or_default()
+ .merge(self.buffer.sequence_number_range())
+ }
+
/// Return the progress from this Partition
pub(super) fn progress(&self) -> ShardProgress {
- self.data.progress()
- }
+ let mut p = ShardProgress::default();
+
+ let range = self.buffer.sequence_number_range();
+ // Observe both the min & max, as the ShardProgress tracks both.
+ if let Some(v) = range.inclusive_min() {
+ p = p.with_buffered(v);
+ p = p.with_buffered(range.inclusive_max().unwrap());
+ }
+
+ // Observe the buffered state, if any.
+ if let Some(range) = self.persisting.as_ref().map(|p| p.sequence_number_range()) {
+ // Observe both the min & max, as the ShardProgress tracks both.
+ //
+ // All persisting batches MUST contain data. This is an invariant
+ // upheld by the state machine.
+ p = p.with_buffered(range.inclusive_min().unwrap());
+ p = p.with_buffered(range.inclusive_max().unwrap());
+ }
+
+ // And finally report the persist watermark for this partition.
+ if let Some(v) = self.max_persisted_sequence_number() {
+ p = p.with_persisted(v)
+ }
- pub(super) fn partition_id(&self) -> PartitionId {
- self.id
+ trace!(
+ shard_id = %self.shard_id,
+ namespace_id = %self.namespace_id,
+ table_id = %self.table_id,
+ table_name = %self.table_name,
+ partition_id = %self.partition_id,
+ partition_key = %self.partition_key,
+ progress = ?p,
+ "progress query"
+ );
+
+ p
}
- /// Return the [`SequenceNumber`] that forms the (inclusive) persistence
- /// watermark for this partition.
- pub(crate) fn max_persisted_sequence_number(&self) -> Option<SequenceNumber> {
- self.max_persisted_sequence_number
+ /// Snapshot and mark all buffered data as persisting.
+ ///
+ /// This method returns [`None`] if no data is buffered in [`Self`].
+ ///
+ /// A reference to the persisting data is retained until a corresponding
+ /// call to [`Self::mark_persisted()`] is made to release it.
+ ///
+ /// # Panics
+ ///
+ /// This method panics if [`Self`] contains data already an ongoing persist
+ /// operation. All calls to [`Self::mark_persisting()`] must be followed by
+ /// a matching call to [`Self::mark_persisted()`] before a new persist can
+ /// begin.
+ pub(super) fn mark_persisting(&mut self) -> Option<QueryAdaptor> {
+ // Assert that there is at most one persist operation per partition
+ // ongoing at any one time.
+ //
+ // This is not a system invariant, however the system MUST make
+ // persisted partitions visible in monotonic order w.r.t their sequence
+ // numbers.
+ assert!(
+ self.persisting.is_none(),
+ "starting persistence on partition in persisting state"
+ );
+
+ let persisting = std::mem::take(&mut self.buffer).into_persisting()?;
+
+ // From this point on, all code MUST be infallible or the buffered data
+ // contained within persisting may be dropped.
+
+ debug!(
+ shard_id = %self.shard_id,
+ namespace_id = %self.namespace_id,
+ table_id = %self.table_id,
+ table_name = %self.table_name,
+ partition_id = %self.partition_id,
+ partition_key = %self.partition_key,
+ current_max_persisted_sequence_number = ?self.max_persisted_sequence_number,
+ persisting_min_sequence_number = ?persisting.sequence_number_range().inclusive_min(),
+ persisting_max_sequence_number = ?persisting.sequence_number_range().inclusive_max(),
+ "marking partition as persisting"
+ );
+
+ let data = persisting.get_query_data();
+ self.persisting = Some(persisting);
+
+ Some(QueryAdaptor::new(
+ self.table_name.clone(),
+ self.partition_id,
+ data,
+ ))
}
/// Mark this partition as having completed persistence up to, and
/// including, the specified [`SequenceNumber`].
+ ///
+ /// All references to actively persisting are released.
+ ///
+ /// # Panics
+ ///
+ /// This method panics if [`Self`] is not marked as undergoing a persist
+ /// operation. All calls to [`Self::mark_persisted()`] must be preceded by a
+ /// matching call to [`Self::mark_persisting()`].
pub(super) fn mark_persisted(&mut self, sequence_number: SequenceNumber) {
+ // Assert there is a batch marked as persisting in self, that it has a
+ // non-empty sequence number range, and that the persisted upper bound
+ // matches the data in the batch being dropped.
+ //
+ // TODO: once this has been deployed without issue (the assert does not
+ // fire), passing the sequence number is redundant and can be removed.
+ let persisting_max = self
+ .persisting
+ .as_ref()
+ .expect("must be a persisting batch when marking complete")
+ .sequence_number_range()
+ .inclusive_max()
+ .expect("persisting batch must contain sequence numbers");
+ assert_eq!(
+ persisting_max, sequence_number,
+ "marking {:?} as persisted but persisting batch max is {:?}",
+ sequence_number, persisting_max
+ );
+
+ // Additionally assert the persisting batch is ordered strictly before
+ // the data in the buffer, if any.
+ //
+ // This asserts writes are monotonically applied.
+ if let Some(buffer_min) = self.buffer.sequence_number_range().inclusive_min() {
+ assert!(persisting_max < buffer_min, "monotonicity violation");
+ }
+
// It is an invariant that partitions are persisted in order so that
// both the per-shard, and per-partition watermarks are correctly
// advanced and accurate.
@@ -288,35 +347,53 @@ impl PartitionData {
}
self.max_persisted_sequence_number = Some(sequence_number);
- self.data.mark_persisted();
+ self.persisting = None;
+
+ debug!(
+ shard_id = %self.shard_id,
+ namespace_id = %self.namespace_id,
+ table_id = %self.table_id,
+ table_name = %self.table_name,
+ partition_id = %self.partition_id,
+ partition_key = %self.partition_key,
+ current_max_persisted_sequence_number = ?self.max_persisted_sequence_number,
+ "marking partition persistence complete"
+ );
+ }
+
+ pub(crate) fn partition_id(&self) -> PartitionId {
+ self.partition_id
+ }
+
+ /// Return the [`SequenceNumber`] that forms the (inclusive) persistence
+ /// watermark for this partition.
+ pub(crate) fn max_persisted_sequence_number(&self) -> Option<SequenceNumber> {
+ self.max_persisted_sequence_number
}
/// Return the name of the table this [`PartitionData`] is buffering writes
/// for.
- #[cfg(test)]
- pub(crate) fn table_name(&self) -> &str {
- self.table_name.as_ref()
+ pub(crate) fn table_name(&self) -> &TableName {
+ &self.table_name
}
/// Return the shard ID for this partition.
- #[cfg(test)]
pub(crate) fn shard_id(&self) -> ShardId {
self.shard_id
}
/// Return the table ID for this partition.
- #[cfg(test)]
pub(crate) fn table_id(&self) -> TableId {
self.table_id
}
/// Return the partition key for this partition.
- pub fn partition_key(&self) -> &PartitionKey {
+ pub(crate) fn partition_key(&self) -> &PartitionKey {
&self.partition_key
}
/// Return the [`NamespaceId`] this partition is a part of.
- pub fn namespace_id(&self) -> NamespaceId {
+ pub(crate) fn namespace_id(&self) -> NamespaceId {
self.namespace_id
}
@@ -338,190 +415,467 @@ impl PartitionData {
#[cfg(test)]
mod tests {
- use std::time::Duration;
+ use std::{ops::Deref, time::Duration};
- use arrow_util::assert_batches_sorted_eq;
+ use arrow::compute::SortOptions;
+ use arrow_util::assert_batches_eq;
use assert_matches::assert_matches;
use backoff::BackoffConfig;
use data_types::ShardIndex;
+ use datafusion::{
+ physical_expr::PhysicalSortExpr,
+ physical_plan::{expressions::col, memory::MemoryExec, ExecutionPlan},
+ };
+ use datafusion_util::test_collect;
use iox_catalog::interface::Catalog;
+ use iox_query::QueryChunk;
+ use lazy_static::lazy_static;
use mutable_batch_lp::test_helpers::lp_to_mutable_batch;
use crate::test_util::populate_catalog;
use super::*;
- #[test]
- fn snapshot_buffer_different_but_compatible_schemas() {
- let mut partition_data = PartitionData::new(
- PartitionId::new(1),
- "bananas".into(),
- ShardId::new(1),
- NamespaceId::new(42),
- TableId::new(1),
- "foo".into(),
+ const PARTITION_ID: PartitionId = PartitionId::new(1);
+
+ lazy_static! {
+ static ref PARTITION_KEY: PartitionKey = PartitionKey::from("platanos");
+ static ref TABLE_NAME: TableName = TableName::from("bananas");
+ }
+
+ // Write some data and read it back from the buffer.
+ //
+ // This ensures the sequence range, progress API, buffering, snapshot
+ // generation & query all work as intended.
+ #[tokio::test]
+ async fn test_write_read() {
+ let mut p = PartitionData::new(
+ PARTITION_ID,
+ PARTITION_KEY.clone(),
+ ShardId::new(2),
+ NamespaceId::new(3),
+ TableId::new(4),
+ TABLE_NAME.clone(),
SortKeyState::Provided(None),
None,
);
- let seq_num1 = SequenceNumber::new(1);
- // Missing tag `t1`
- let (_, mut mutable_batch1) =
- lp_to_mutable_batch(r#"foo iv=1i,uv=774u,fv=1.0,bv=true,sv="hi" 1"#);
- partition_data
- .buffer_write(seq_num1, mutable_batch1.clone())
- .unwrap();
+ // No writes should report no sequence offsets.
+ {
+ let range = p.sequence_number_range();
+ assert_eq!(range.inclusive_min(), None);
+ assert_eq!(range.inclusive_max(), None);
+ }
- let seq_num2 = SequenceNumber::new(2);
- // Missing field `iv`
- let (_, mutable_batch2) =
- lp_to_mutable_batch(r#"foo,t1=aoeu uv=1u,fv=12.0,bv=false,sv="bye" 10000"#);
+ // The progress API should indicate there is no progress status.
+ assert!(p.progress().is_empty());
- partition_data
- .buffer_write(seq_num2, mutable_batch2.clone())
- .unwrap();
- partition_data.data.generate_snapshot().unwrap();
+ // And no data should be returned when queried.
+ assert!(p.get_query_data().is_none());
- assert!(partition_data.data.buffer.is_none());
- assert_eq!(partition_data.data.snapshots.len(), 1);
+ // Perform a single write.
+ let mb = lp_to_mutable_batch(r#"bananas,city=London people=2,pigeons="millions" 10"#).1;
+ p.buffer_write(mb, SequenceNumber::new(1))
+ .expect("write should succeed");
- let snapshot = &partition_data.data.snapshots[0];
- assert_eq!(snapshot.min_sequence_number, seq_num1);
- assert_eq!(snapshot.max_sequence_number, seq_num2);
+ // The sequence range should now cover the single write.
+ {
+ let range = p.sequence_number_range();
+ assert_eq!(range.inclusive_min(), Some(SequenceNumber::new(1)));
+ assert_eq!(range.inclusive_max(), Some(SequenceNumber::new(1)));
+ }
+
+ // The progress API should indicate there is some data buffered, but not
+ // persisted.
+ {
+ let progress = p.progress();
+ assert!(progress.readable(SequenceNumber::new(1)));
+ assert!(!progress.persisted(SequenceNumber::new(1)));
+ }
+
+ // The data should be readable.
+ {
+ let data = p.get_query_data().expect("should return data");
+ assert_eq!(data.partition_id(), PARTITION_ID);
+ assert_eq!(data.table_name(), TABLE_NAME.to_string());
+
+ let expected = [
+ "+--------+--------+----------+--------------------------------+",
+ "| city | people | pigeons | time |",
+ "+--------+--------+----------+--------------------------------+",
+ "| London | 2 | millions | 1970-01-01T00:00:00.000000010Z |",
+ "+--------+--------+----------+--------------------------------+",
+ ];
+ assert_batches_eq!(
+ expected,
+ &*data
+ .record_batches()
+ .iter()
+ .map(Deref::deref)
+ .cloned()
+ .collect::<Vec<_>>()
+ );
+ }
+
+ // Perform a another write, adding data to the existing queryable data
+ // snapshot.
+ let mb = lp_to_mutable_batch(r#"bananas,city=Madrid people=4,pigeons="none" 20"#).1;
+ p.buffer_write(mb, SequenceNumber::new(2))
+ .expect("write should succeed");
+
+ // The sequence range should now cover both writes.
+ {
+ let range = p.sequence_number_range();
+ assert_eq!(range.inclusive_min(), Some(SequenceNumber::new(1)));
+ assert_eq!(range.inclusive_max(), Some(SequenceNumber::new(2)));
+ }
+
+ // The progress API should indicate there is more data buffered, but not
+ // persisted.
+ {
+ let progress = p.progress();
+ assert!(progress.readable(SequenceNumber::new(1)));
+ assert!(progress.readable(SequenceNumber::new(2)));
+ assert!(!progress.persisted(SequenceNumber::new(1)));
+ assert!(!progress.persisted(SequenceNumber::new(2)));
+ }
- mutable_batch1.extend_from(&mutable_batch2).unwrap();
- let combined_record_batch = mutable_batch1.to_arrow(Selection::All).unwrap();
- assert_eq!(&*snapshot.data, &combined_record_batch);
+ // And finally both writes should be readable.
+ {
+ let data = p.get_query_data().expect("should contain data");
+ assert_eq!(data.partition_id(), PARTITION_ID);
+ assert_eq!(data.table_name(), TABLE_NAME.to_string());
+
+ let expected = [
+ "+--------+--------+----------+--------------------------------+",
+ "| city | people | pigeons | time |",
+ "+--------+--------+----------+--------------------------------+",
+ "| London | 2 | millions | 1970-01-01T00:00:00.000000010Z |",
+ "| Madrid | 4 | none | 1970-01-01T00:00:00.000000020Z |",
+ "+--------+--------+----------+--------------------------------+",
+ ];
+ assert_batches_eq!(
+ expected,
+ &*data
+ .record_batches()
+ .iter()
+ .map(Deref::deref)
+ .cloned()
+ .collect::<Vec<_>>()
+ );
+ }
}
- // Test deletes mixed with writes on a single parittion
+ // Test persist operations against the partition, ensuring data is readable
+ // both before, during, and after a persist takes place.
#[tokio::test]
- async fn writes() {
- // Make a partition with empty DataBuffer
- let s_id = 1;
- let t_id = 1;
- let p_id = 1;
+ async fn test_persist() {
let mut p = PartitionData::new(
- PartitionId::new(p_id),
- "bananas".into(),
- ShardId::new(s_id),
- NamespaceId::new(42),
- TableId::new(t_id),
- "restaurant".into(),
+ PARTITION_ID,
+ PARTITION_KEY.clone(),
+ ShardId::new(2),
+ NamespaceId::new(3),
+ TableId::new(4),
+ TABLE_NAME.clone(),
SortKeyState::Provided(None),
None,
);
- // ------------------------------------------
- // Fill `buffer`
- // --- seq_num: 1
- let (_, mb) = lp_to_mutable_batch(r#"restaurant,city=Boston day="fri",temp=50 10"#);
- p.buffer_write(SequenceNumber::new(1), mb).unwrap();
+ assert!(p.max_persisted_sequence_number().is_none());
+ assert!(p.get_query_data().is_none());
+
+ // Perform a single write.
+ let mb = lp_to_mutable_batch(r#"bananas,city=London people=2,pigeons="millions" 10"#).1;
+ p.buffer_write(mb, SequenceNumber::new(1))
+ .expect("write should succeed");
+
+ // Begin persisting the partition.
+ let persisting_data = p.mark_persisting().expect("must contain existing data");
+ // And validate the data being persisted.
+ assert_eq!(persisting_data.partition_id(), PARTITION_ID);
+ assert_eq!(persisting_data.table_name(), TABLE_NAME.to_string());
+ assert_eq!(persisting_data.record_batches().len(), 1);
+ let expected = [
+ "+--------+--------+----------+--------------------------------+",
+ "| city | people | pigeons | time |",
+ "+--------+--------+----------+--------------------------------+",
+ "| London | 2 | millions | 1970-01-01T00:00:00.000000010Z |",
+ "+--------+--------+----------+--------------------------------+",
+ ];
+ assert_batches_eq!(
+ expected,
+ &*persisting_data
+ .record_batches()
+ .iter()
+ .map(Deref::deref)
+ .cloned()
+ .collect::<Vec<_>>()
+ );
- // --- seq_num: 2
- let (_, mb) = lp_to_mutable_batch(r#"restaurant,city=Andover day="thu",temp=44 15"#);
+ // The sequence range should now cover the single persisting write.
+ {
+ let range = p.sequence_number_range();
+ assert_eq!(range.inclusive_min(), Some(SequenceNumber::new(1)));
+ assert_eq!(range.inclusive_max(), Some(SequenceNumber::new(1)));
+ }
- p.buffer_write(SequenceNumber::new(2), mb).unwrap();
+ // The progress API should indicate there is some data buffered, but not
+ // yet persisted.
+ {
+ let progress = p.progress();
+ assert!(progress.readable(SequenceNumber::new(1)));
+ assert!(!progress.persisted(SequenceNumber::new(1)));
+ }
- // verify data
- assert_eq!(
- p.data.buffer.as_ref().unwrap().min_sequence_number,
- SequenceNumber::new(1)
- );
+ // And the max_persisted_sequence_number should not have changed.
+ assert!(p.max_persisted_sequence_number().is_none());
+
+ // Buffer another write during an ongoing persist.
+ let mb = lp_to_mutable_batch(r#"bananas,city=Madrid people=4,pigeons="none" 20"#).1;
+ p.buffer_write(mb, SequenceNumber::new(2))
+ .expect("write should succeed");
+
+ // Which must be readable, alongside the ongoing persist data.
+ {
+ let data = p.get_query_data().expect("must have data");
+ assert_eq!(data.partition_id(), PARTITION_ID);
+ assert_eq!(data.table_name(), TABLE_NAME.to_string());
+ assert_eq!(data.record_batches().len(), 2);
+ let expected = [
+ "+--------+--------+----------+--------------------------------+",
+ "| city | people | pigeons | time |",
+ "+--------+--------+----------+--------------------------------+",
+ "| London | 2 | millions | 1970-01-01T00:00:00.000000010Z |",
+ "| Madrid | 4 | none | 1970-01-01T00:00:00.000000020Z |",
+ "+--------+--------+----------+--------------------------------+",
+ ];
+ assert_batches_eq!(
+ expected,
+ &*data
+ .record_batches()
+ .iter()
+ .map(Deref::deref)
+ .cloned()
+ .collect::<Vec<_>>()
+ );
+ }
+
+ // The sequence range should still cover both writes.
+ {
+ let range = p.sequence_number_range();
+ assert_eq!(range.inclusive_min(), Some(SequenceNumber::new(1)));
+ assert_eq!(range.inclusive_max(), Some(SequenceNumber::new(2)));
+ }
+
+ // The progress API should indicate that both writes are still data
+ // buffered.
+ {
+ let progress = p.progress();
+ assert!(progress.readable(SequenceNumber::new(1)));
+ assert!(progress.readable(SequenceNumber::new(2)));
+ assert!(!progress.persisted(SequenceNumber::new(1)));
+ assert!(!progress.persisted(SequenceNumber::new(2)));
+ }
+
+ // And the max_persisted_sequence_number should not have changed.
+ assert!(p.max_persisted_sequence_number().is_none());
+
+ // The persist now "completes".
+ p.mark_persisted(SequenceNumber::new(1));
+
+ // The sequence range should now cover only the second remaining
+ // buffered write.
+ {
+ let range = p.sequence_number_range();
+ assert_eq!(range.inclusive_min(), Some(SequenceNumber::new(2)));
+ assert_eq!(range.inclusive_max(), Some(SequenceNumber::new(2)));
+ }
+
+ // The progress API should indicate that the writes are readable
+ // (somewhere, not necessarily in the ingester), and the first write is
+ // persisted.
+ {
+ let progress = p.progress();
+ assert!(progress.readable(SequenceNumber::new(1)));
+ assert!(progress.readable(SequenceNumber::new(2)));
+ assert!(progress.persisted(SequenceNumber::new(1)));
+ assert!(!progress.persisted(SequenceNumber::new(2)));
+ }
+
+ // And the max_persisted_sequence_number should reflect the completed
+ // persist op.
assert_eq!(
- p.data.buffer.as_ref().unwrap().max_sequence_number,
- SequenceNumber::new(2)
+ p.max_persisted_sequence_number(),
+ Some(SequenceNumber::new(1))
);
- assert_eq!(p.data.snapshots.len(), 0);
- assert_eq!(p.data.persisting, None);
-
- // ------------------------------------------
- // Fill `buffer`
- // --- seq_num: 4
- let (_, mb) = lp_to_mutable_batch(
- r#"
- restaurant,city=Medford day="sun",temp=55 22
- restaurant,city=Boston day="sun",temp=57 24
- "#,
- );
- p.buffer_write(SequenceNumber::new(4), mb).unwrap();
- // --- seq_num: 5
- let (_, mb) = lp_to_mutable_batch(r#"restaurant,city=Andover day="tue",temp=56 30"#);
+ // Querying the buffer should now return only the second write.
+ {
+ let data = p.get_query_data().expect("must have data");
+ assert_eq!(data.partition_id(), PARTITION_ID);
+ assert_eq!(data.table_name(), TABLE_NAME.to_string());
+ assert_eq!(data.record_batches().len(), 1);
+ let expected = [
+ "+--------+--------+---------+--------------------------------+",
+ "| city | people | pigeons | time |",
+ "+--------+--------+---------+--------------------------------+",
+ "| Madrid | 4 | none | 1970-01-01T00:00:00.000000020Z |",
+ "+--------+--------+---------+--------------------------------+",
+ ];
+ assert_batches_eq!(
+ expected,
+ &*data
+ .record_batches()
+ .iter()
+ .map(Deref::deref)
+ .cloned()
+ .collect::<Vec<_>>()
+ );
+ }
+ }
- p.buffer_write(SequenceNumber::new(5), mb).unwrap();
+ // Ensure the ordering of snapshots & persisting data is preserved such that
+ // updates resolve correctly.
+ #[tokio::test]
+ async fn test_record_batch_ordering() {
+ // A helper function to dedupe the record batches in [`QueryAdaptor`]
+ // and assert the resulting batch contents.
+ async fn assert_deduped(expect: &[&str], batch: QueryAdaptor) {
+ let batch = batch
+ .record_batches()
+ .iter()
+ .map(Deref::deref)
+ .cloned()
+ .collect::<Vec<_>>();
+
+ let sort_keys = vec![PhysicalSortExpr {
+ expr: col("time", &batch[0].schema()).unwrap(),
+ options: SortOptions {
+ descending: false,
+ nulls_first: false,
+ },
+ }];
+
+ // Setup in memory stream
+ let schema = batch[0].schema();
+ let projection = None;
+ let input = Arc::new(MemoryExec::try_new(&[batch], schema, projection).unwrap());
+
+ // Create and run the deduplicator
+ let exec = Arc::new(iox_query::provider::DeduplicateExec::new(input, sort_keys));
+ let got = test_collect(Arc::clone(&exec) as Arc<dyn ExecutionPlan>).await;
+
+ assert_batches_eq!(expect, &*got);
+ }
- // verify data
- assert_eq!(
- p.data.buffer.as_ref().unwrap().min_sequence_number,
- SequenceNumber::new(1)
+ let mut p = PartitionData::new(
+ PARTITION_ID,
+ PARTITION_KEY.clone(),
+ ShardId::new(2),
+ NamespaceId::new(3),
+ TableId::new(4),
+ TABLE_NAME.clone(),
+ SortKeyState::Provided(None),
+ None,
);
+
+ // Perform the initial write.
+ //
+ // In the next series of writes this test will overwrite the value of x
+ // and assert the deduped resulting state.
+ let mb = lp_to_mutable_batch(r#"bananas x=1 42"#).1;
+ p.buffer_write(mb, SequenceNumber::new(1))
+ .expect("write should succeed");
+
+ assert_eq!(p.get_query_data().unwrap().record_batches().len(), 1);
+ assert_deduped(
+ &[
+ "+--------------------------------+---+",
+ "| time | x |",
+ "+--------------------------------+---+",
+ "| 1970-01-01T00:00:00.000000042Z | 1 |",
+ "+--------------------------------+---+",
+ ],
+ p.get_query_data().unwrap(),
+ )
+ .await;
+
+ // Write an update
+ let mb = lp_to_mutable_batch(r#"bananas x=2 42"#).1;
+ p.buffer_write(mb, SequenceNumber::new(2))
+ .expect("write should succeed");
+
+ assert_eq!(p.get_query_data().unwrap().record_batches().len(), 1);
+ assert_deduped(
+ &[
+ "+--------------------------------+---+",
+ "| time | x |",
+ "+--------------------------------+---+",
+ "| 1970-01-01T00:00:00.000000042Z | 2 |",
+ "+--------------------------------+---+",
+ ],
+ p.get_query_data().unwrap(),
+ )
+ .await;
+
+ // Begin persisting the data, moving the buffer to the persisting state.
+ {
+ let batches = p.mark_persisting().unwrap();
+ assert_eq!(batches.record_batches().len(), 1);
+ assert_deduped(
+ &[
+ "+--------------------------------+---+",
+ "| time | x |",
+ "+--------------------------------+---+",
+ "| 1970-01-01T00:00:00.000000042Z | 2 |",
+ "+--------------------------------+---+",
+ ],
+ batches,
+ )
+ .await;
+ }
+
+ // Buffer another write, and generate a snapshot by querying it.
+ let mb = lp_to_mutable_batch(r#"bananas x=3 42"#).1;
+ p.buffer_write(mb, SequenceNumber::new(3))
+ .expect("write should succeed");
+
+ assert_eq!(p.get_query_data().unwrap().record_batches().len(), 2);
+ assert_deduped(
+ &[
+ "+--------------------------------+---+",
+ "| time | x |",
+ "+--------------------------------+---+",
+ "| 1970-01-01T00:00:00.000000042Z | 3 |",
+ "+--------------------------------+---+",
+ ],
+ p.get_query_data().unwrap(),
+ )
+ .await;
+
+ // Finish persisting.
+ p.mark_persisted(SequenceNumber::new(2));
assert_eq!(
- p.data.buffer.as_ref().unwrap().max_sequence_number,
- SequenceNumber::new(5)
+ p.max_persisted_sequence_number(),
+ Some(SequenceNumber::new(2))
);
- assert_eq!(p.data.snapshots.len(), 0);
- assert_eq!(p.data.persisting, None);
- assert!(p.data.buffer.is_some());
-
- // ------------------------------------------
- // Persisting
- let p_batch = p.snapshot_to_persisting_batch().unwrap();
-
- // verify data
- assert!(p.data.buffer.is_none()); // always empty after issuing persit
- assert_eq!(p.data.snapshots.len(), 0); // always empty after issuing persit
- assert_eq!(p.data.persisting, Some(Arc::clone(&p_batch)));
-
- // verify data
- assert!(p.data.buffer.is_none());
- assert_eq!(p.data.snapshots.len(), 0); // no snpashots becasue buffer has not data yet and the
- // snapshot was empty too
- assert_eq!(p.data.persisting, Some(Arc::clone(&p_batch)));
-
- // ------------------------------------------
- // Fill `buffer`
- // --- seq_num: 8
- let (_, mb) = lp_to_mutable_batch(
- r#"
- restaurant,city=Wilmington day="sun",temp=55 35
- restaurant,city=Boston day="sun",temp=60 36
- restaurant,city=Boston day="sun",temp=62 38
- "#,
- );
- p.buffer_write(SequenceNumber::new(8), mb).unwrap();
- // verify data
- assert_eq!(
- p.data.buffer.as_ref().unwrap().min_sequence_number,
- SequenceNumber::new(8)
- ); // 1 newly added mutable batch of 3 rows of data
- assert_eq!(p.data.snapshots.len(), 0); // still empty
- assert_eq!(p.data.persisting, Some(Arc::clone(&p_batch)));
-
- // ------------------------------------------
- // Take snapshot of the `buffer`
- p.snapshot().unwrap();
- // verify data
- assert!(p.data.buffer.is_none()); // empty after snapshot
- assert_eq!(p.data.snapshots.len(), 1); // data moved from buffer
- assert_eq!(p.data.persisting, Some(Arc::clone(&p_batch)));
- // snapshot has three rows moved from buffer
- let data = (*p.data.snapshots[0].data).clone();
- let expected = vec![
- "+------------+-----+------+--------------------------------+",
- "| city | day | temp | time |",
- "+------------+-----+------+--------------------------------+",
- "| Wilmington | sun | 55 | 1970-01-01T00:00:00.000000035Z |",
- "| Boston | sun | 60 | 1970-01-01T00:00:00.000000036Z |",
- "| Boston | sun | 62 | 1970-01-01T00:00:00.000000038Z |",
- "+------------+-----+------+--------------------------------+",
- ];
- assert_batches_sorted_eq!(&expected, &[data]);
- assert_eq!(p.data.snapshots[0].min_sequence_number.get(), 8);
- assert_eq!(p.data.snapshots[0].max_sequence_number.get(), 8);
+ // And assert the correct value remains.
+ assert_eq!(p.get_query_data().unwrap().record_batches().len(), 1);
+ assert_deduped(
+ &[
+ "+--------------------------------+---+",
+ "| time | x |",
+ "+--------------------------------+---+",
+ "| 1970-01-01T00:00:00.000000042Z | 3 |",
+ "+--------------------------------+---+",
+ ],
+ p.get_query_data().unwrap(),
+ )
+ .await;
}
+ // Ensure an updated sort key is returned.
#[tokio::test]
async fn test_update_provided_sort_key() {
let starting_state =
@@ -545,6 +899,7 @@ mod tests {
assert_eq!(p.sort_key().get().await, want);
}
+ // Test loading a deferred sort key from the catalog on demand.
#[tokio::test]
async fn test_update_deferred_sort_key() {
let metrics = Arc::new(metric::Registry::default());
@@ -600,4 +955,243 @@ mod tests {
assert_matches!(p.sort_key(), SortKeyState::Provided(_));
assert_eq!(p.sort_key().get().await, want);
}
+
+ // Perform writes with non-monotonic sequence numbers.
+ #[tokio::test]
+ #[should_panic(expected = "monotonicity violation")]
+ async fn test_non_monotonic_writes() {
+ let mut p = PartitionData::new(
+ PARTITION_ID,
+ PARTITION_KEY.clone(),
+ ShardId::new(2),
+ NamespaceId::new(3),
+ TableId::new(4),
+ TABLE_NAME.clone(),
+ SortKeyState::Provided(None),
+ None,
+ );
+
+ // Perform out of order writes.
+ let mb = lp_to_mutable_batch(r#"bananas,city=London people=2,pigeons="millions" 10"#).1;
+ p.buffer_write(mb.clone(), SequenceNumber::new(2))
+ .expect("write should succeed");
+ let _ = p.buffer_write(mb, SequenceNumber::new(1));
+ }
+
+ #[tokio::test]
+ #[should_panic(expected = "must be a persisting batch when marking complete")]
+ async fn test_mark_persisted_not_persisting() {
+ let mut p = PartitionData::new(
+ PARTITION_ID,
+ PARTITION_KEY.clone(),
+ ShardId::new(2),
+ NamespaceId::new(3),
+ TableId::new(4),
+ TABLE_NAME.clone(),
+ SortKeyState::Provided(None),
+ None,
+ );
+
+ p.mark_persisted(SequenceNumber::new(1));
+ }
+
+ #[tokio::test]
+ async fn test_mark_persisting_no_data() {
+ let mut p = PartitionData::new(
+ PARTITION_ID,
+ PARTITION_KEY.clone(),
+ ShardId::new(2),
+ NamespaceId::new(3),
+ TableId::new(4),
+ TABLE_NAME.clone(),
+ SortKeyState::Provided(None),
+ None,
+ );
+
+ assert!(p.mark_persisting().is_none());
+ }
+
+ #[tokio::test]
+ #[should_panic(expected = "starting persistence on partition in persisting state")]
+ async fn test_mark_persisting_twice() {
+ let mut p = PartitionData::new(
+ PARTITION_ID,
+ PARTITION_KEY.clone(),
+ ShardId::new(2),
+ NamespaceId::new(3),
+ TableId::new(4),
+ TABLE_NAME.clone(),
+ SortKeyState::Provided(None),
+ None,
+ );
+
+ let mb = lp_to_mutable_batch(r#"bananas,city=London people=2,pigeons="millions" 10"#).1;
+ p.buffer_write(mb, SequenceNumber::new(2))
+ .expect("write should succeed");
+
+ assert!(p.mark_persisting().is_some());
+
+ p.mark_persisting();
+ }
+
+ #[tokio::test]
+ #[should_panic(
+ expected = "marking SequenceNumber(42) as persisted but persisting batch max is SequenceNumber(2)"
+ )]
+ async fn test_mark_persisted_wrong_sequence_number() {
+ let mut p = PartitionData::new(
+ PARTITION_ID,
+ PARTITION_KEY.clone(),
+ ShardId::new(2),
+ NamespaceId::new(3),
+ TableId::new(4),
+ TABLE_NAME.clone(),
+ SortKeyState::Provided(None),
+ None,
+ );
+
+ let mb = lp_to_mutable_batch(r#"bananas,city=London people=2,pigeons="millions" 10"#).1;
+ p.buffer_write(mb, SequenceNumber::new(2))
+ .expect("write should succeed");
+
+ assert!(p.mark_persisting().is_some());
+
+ p.mark_persisted(SequenceNumber::new(42));
+ }
+
+ // Because persisting moves the data out of the "hot" buffer, the sequence
+ // numbers are not validated as being monotonic (the new buffer has no
+ // sequence numbers to compare against).
+ //
+ // Instead this check is performed when marking the persist op as complete.
+ #[tokio::test]
+ #[should_panic(expected = "monotonicity violation")]
+ async fn test_non_monotonic_writes_with_persistence() {
+ let mut p = PartitionData::new(
+ PARTITION_ID,
+ PARTITION_KEY.clone(),
+ ShardId::new(2),
+ NamespaceId::new(3),
+ TableId::new(4),
+ TABLE_NAME.clone(),
+ SortKeyState::Provided(None),
+ None,
+ );
+
+ let mb = lp_to_mutable_batch(r#"bananas,city=London people=2,pigeons="millions" 10"#).1;
+ p.buffer_write(mb.clone(), SequenceNumber::new(42))
+ .expect("write should succeed");
+
+ assert!(p.mark_persisting().is_some());
+
+ // This succeeds due to a new buffer being in place that cannot track
+ // previous sequence numbers.
+ p.buffer_write(mb, SequenceNumber::new(1))
+ .expect("out of order write should succeed");
+
+ // The assert on non-monotonic writes moves to here instead.
+ p.mark_persisted(SequenceNumber::new(42));
+ }
+
+ // As above, the sequence numbers are not tracked between buffer instances.
+ //
+ // This ensures that a write after a batch is persisted is still required to
+ // be monotonic.
+ #[tokio::test]
+ #[should_panic(expected = "monotonicity violation")]
+ async fn test_non_monotonic_writes_after_persistence() {
+ let mut p = PartitionData::new(
+ PARTITION_ID,
+ PARTITION_KEY.clone(),
+ ShardId::new(2),
+ NamespaceId::new(3),
+ TableId::new(4),
+ TABLE_NAME.clone(),
+ SortKeyState::Provided(None),
+ None,
+ );
+
+ let mb = lp_to_mutable_batch(r#"bananas,city=London people=2,pigeons="millions" 10"#).1;
+ p.buffer_write(mb.clone(), SequenceNumber::new(42))
+ .expect("write should succeed");
+
+ assert!(p.mark_persisting().is_some());
+ p.mark_persisted(SequenceNumber::new(42));
+
+ // This should fail as the write "goes backwards".
+ p.buffer_write(mb, SequenceNumber::new(1))
+ .expect("out of order write should succeed");
+ }
+
+ // As above, but with a pre-configured persist marker.
+ #[tokio::test]
+ #[should_panic(expected = "monotonicity violation")]
+ async fn test_non_monotonic_writes_persist_marker() {
+ let mut p = PartitionData::new(
+ PARTITION_ID,
+ PARTITION_KEY.clone(),
+ ShardId::new(2),
+ NamespaceId::new(3),
+ TableId::new(4),
+ TABLE_NAME.clone(),
+ SortKeyState::Provided(None),
+ Some(SequenceNumber::new(42)),
+ );
+ assert_eq!(
+ p.max_persisted_sequence_number(),
+ Some(SequenceNumber::new(42))
+ );
+
+ let mb = lp_to_mutable_batch(r#"bananas,city=London people=2,pigeons="millions" 10"#).1;
+
+ // This should fail as the write "goes backwards".
+ p.buffer_write(mb, SequenceNumber::new(1))
+ .expect("out of order write should succeed");
+ }
+
+ // Restoring a persist marker is included in progress reports.
+ #[tokio::test]
+ async fn test_persist_marker_progress() {
+ let p = PartitionData::new(
+ PARTITION_ID,
+ PARTITION_KEY.clone(),
+ ShardId::new(2),
+ NamespaceId::new(3),
+ TableId::new(4),
+ TABLE_NAME.clone(),
+ SortKeyState::Provided(None),
+ Some(SequenceNumber::new(42)),
+ );
+ assert_eq!(
+ p.max_persisted_sequence_number(),
+ Some(SequenceNumber::new(42))
+ );
+
+ // Sequence number ranges cover buffered data only.
+ assert!(p.sequence_number_range().inclusive_min().is_none());
+ assert!(p.sequence_number_range().inclusive_max().is_none());
+
+ // Progress API returns that the op is persisted and readable (not on
+ // the ingester, but via object storage)
+ assert!(p.progress().readable(SequenceNumber::new(42)));
+ assert!(p.progress().persisted(SequenceNumber::new(42)));
+ }
+
+ // Ensure an empty PartitionData does not panic due to constructing an empty
+ // QueryAdaptor.
+ #[test]
+ fn test_empty_partition_no_queryadaptor_panic() {
+ let mut p = PartitionData::new(
+ PARTITION_ID,
+ PARTITION_KEY.clone(),
+ ShardId::new(2),
+ NamespaceId::new(3),
+ TableId::new(4),
+ TABLE_NAME.clone(),
+ SortKeyState::Provided(None),
+ Some(SequenceNumber::new(42)),
+ );
+
+ assert!(p.get_query_data().is_none());
+ }
}
diff --git a/ingester/src/data/partition/buffer.rs b/ingester/src/data/partition/buffer.rs
index 866e7a966c..00e6f376ba 100644
--- a/ingester/src/data/partition/buffer.rs
+++ b/ingester/src/data/partition/buffer.rs
@@ -1,274 +1,112 @@
-//! Data for the lifecycle of the Ingester
-
use std::sync::Arc;
-use data_types::{PartitionId, SequenceNumber, ShardId, TableId};
+use arrow::record_batch::RecordBatch;
+use data_types::SequenceNumber;
use mutable_batch::MutableBatch;
-use schema::selection::Selection;
-use snafu::ResultExt;
-use uuid::Uuid;
-use write_summary::ShardProgress;
-
-use crate::data::table::TableName;
-use super::{PersistingBatch, QueryableBatch, SnapshotBatch};
+use crate::data::SequenceNumberRange;
-/// Data of an IOx partition split into batches
-/// ┌────────────────────────┐ ┌────────────────────────┐ ┌─────────────────────────┐
-/// │ Buffer │ │ Snapshots │ │ Persisting │
-/// │ ┌───────────────────┐ │ │ │ │ │
-/// │ │ ┌───────────────┐│ │ │ ┌───────────────────┐ │ │ ┌───────────────────┐ │
-/// │ │ ┌┴──────────────┐│├─┼────────┼─┼─▶┌───────────────┐│ │ │ │ ┌───────────────┐│ │
-/// │ │┌┴──────────────┐├┘│ │ │ │ ┌┴──────────────┐││ │ │ │ ┌┴──────────────┐││ │
-/// │ ││ BufferBatch ├┘ │ │ │ │┌┴──────────────┐├┘│──┼──────┼─▶│┌┴──────────────┐├┘│ │
-/// │ │└───────────────┘ │ │ ┌───┼─▶│ SnapshotBatch ├┘ │ │ │ ││ SnapshotBatch ├┘ │ │
-/// │ └───────────────────┘ │ │ │ │└───────────────┘ │ │ │ │└───────────────┘ │ │
-/// │ ... │ │ │ └───────────────────┘ │ │ └───────────────────┘ │
-/// │ ┌───────────────────┐ │ │ │ │ │ │
-/// │ │ ┌───────────────┐│ │ │ │ ... │ │ ... │
-/// │ │ ┌┴──────────────┐││ │ │ │ │ │ │
-/// │ │┌┴──────────────┐├┘│─┼────┘ │ ┌───────────────────┐ │ │ ┌───────────────────┐ │
-/// │ ││ BufferBatch ├┘ │ │ │ │ ┌───────────────┐│ │ │ │ ┌───────────────┐│ │
-/// │ │└───────────────┘ │ │ │ │ ┌┴──────────────┐││ │ │ │ ┌┴──────────────┐││ │
-/// │ └───────────────────┘ │ │ │┌┴──────────────┐├┘│──┼──────┼─▶│┌┴──────────────┐├┘│ │
-/// │ │ │ ││ SnapshotBatch ├┘ │ │ │ ││ SnapshotBatch ├┘ │ │
-/// │ ... │ │ │└───────────────┘ │ │ │ │└───────────────┘ │ │
-/// │ │ │ └───────────────────┘ │ │ └───────────────────┘ │
-/// └────────────────────────┘ └────────────────────────┘ └─────────────────────────┘
-#[derive(Debug, Default)]
-pub(crate) struct DataBuffer {
- /// Buffer of incoming writes
- pub(crate) buffer: Option<BufferBatch>,
+mod always_some;
+mod mutable_buffer;
+mod state_machine;
+pub(crate) mod traits;
- /// Data in `buffer` will be moved to a `snapshot` when one of these happens:
- /// . A background persist is called
- /// . A read request from Querier
- /// The `buffer` will be empty when this happens.
- pub(crate) snapshots: Vec<Arc<SnapshotBatch>>,
- /// When a persist is called, data in `buffer` will be moved to a `snapshot`
- /// and then all `snapshots` will be moved to a `persisting`.
- /// Both `buffer` and 'snaphots` will be empty when this happens.
- pub(crate) persisting: Option<Arc<PersistingBatch>>,
- // Extra Notes:
- // . In MVP, we will only persist a set of snapshots at a time.
- // In later version, multiple persisting operations may be happening concurrently but
- // their persisted info must be added into the Catalog in their data
- // ingesting order.
- // . When a read request comes from a Querier, all data from `snapshots`
- // and `persisting` must be sent to the Querier.
- // . After the `persisting` data is persisted and successfully added
- // into the Catalog, it will be removed from this Data Buffer.
- // This data might be added into an extra cache to serve up to
- // Queriers that may not have loaded the parquet files from object
- // storage yet. But this will be decided after MVP.
-}
-
-impl DataBuffer {
- /// If a [`BufferBatch`] exists, convert it to a [`SnapshotBatch`] and add
- /// it to the list of snapshots.
- ///
- /// Does nothing if there is no [`BufferBatch`].
- pub(crate) fn generate_snapshot(&mut self) -> Result<(), mutable_batch::Error> {
- let snapshot = self.copy_buffer_to_snapshot()?;
- if let Some(snapshot) = snapshot {
- self.snapshots.push(snapshot);
- self.buffer = None;
- }
+pub(crate) use state_machine::*;
- Ok(())
- }
+use self::{always_some::AlwaysSome, traits::Queryable};
- /// Returns snapshot of the buffer but keeps data in the buffer
- fn copy_buffer_to_snapshot(&self) -> Result<Option<Arc<SnapshotBatch>>, mutable_batch::Error> {
- if let Some(buf) = &self.buffer {
- return Ok(Some(Arc::new(SnapshotBatch {
- min_sequence_number: buf.min_sequence_number,
- max_sequence_number: buf.max_sequence_number,
- data: Arc::new(buf.data.to_arrow(Selection::All)?),
- })));
- }
+/// The current state of the [`BufferState`] state machine.
+///
+/// NOTE that this does NOT contain the [`Persisting`] state, as this is a
+/// immutable, terminal state that does not accept further writes and is
+/// directly queryable.
+#[derive(Debug)]
+#[must_use = "FSM should not be dropped unused"]
+enum FsmState {
+ /// The data buffer contains no data snapshots, and is accepting writes.
+ Buffering(BufferState<Buffering>),
+}
- Ok(None)
+impl Default for FsmState {
+ fn default() -> Self {
+ Self::Buffering(BufferState::new())
}
+}
- /// Snapshots the buffer and make a QueryableBatch for all the snapshots
- /// Both buffer and snapshots will be empty after this
- pub(super) fn snapshot_to_queryable_batch(
- &mut self,
- table_name: &TableName,
- partition_id: PartitionId,
- ) -> Option<QueryableBatch> {
- self.generate_snapshot()
- .expect("This mutable batch snapshot error should be impossible.");
-
- let mut data = vec![];
- std::mem::swap(&mut data, &mut self.snapshots);
-
- // only produce batch if there is any data
- if data.is_empty() {
- None
- } else {
- Some(QueryableBatch::new(table_name.clone(), partition_id, data))
+impl FsmState {
+ /// Return the current range of writes in the [`BufferState`] state machine,
+ /// if any.
+ pub(crate) fn sequence_number_range(&self) -> &SequenceNumberRange {
+ match self {
+ Self::Buffering(v) => v.sequence_number_range(),
}
}
+}
- /// Returns all existing snapshots plus data in the buffer
- /// This only read data. Data in the buffer will be kept in the buffer
- pub(super) fn buffer_and_snapshots(
- &self,
- ) -> Result<Vec<Arc<SnapshotBatch>>, crate::data::Error> {
- // Existing snapshots
- let mut snapshots = self.snapshots.clone();
-
- // copy the buffer to a snapshot
- let buffer_snapshot = self
- .copy_buffer_to_snapshot()
- .context(crate::data::BufferToSnapshotSnafu)?;
- snapshots.extend(buffer_snapshot);
+/// A helper wrapper over the [`BufferState`] FSM to abstract the caller from
+/// state transitions during reads and writes from the underlying buffer.
+#[derive(Debug, Default)]
+#[must_use = "DataBuffer should not be dropped unused"]
+pub(crate) struct DataBuffer(AlwaysSome<FsmState>);
- Ok(snapshots)
+impl DataBuffer {
+ /// Return the range of [`SequenceNumber`] currently queryable by calling
+ /// [`Self::get_query_data()`].
+ pub(crate) fn sequence_number_range(&self) -> &SequenceNumberRange {
+ self.0.sequence_number_range()
}
- /// Snapshots the buffer and moves snapshots over to the `PersistingBatch`.
+ /// Buffer the given [`MutableBatch`] in memory, ordered by the specified
+ /// [`SequenceNumber`].
///
- /// # Panic
+ /// # Panics
///
- /// Panics if there is already a persisting batch.
- pub(super) fn snapshot_to_persisting(
+ /// This method panics if `sequence_number` is not strictly greater than
+ /// previous calls.
+ pub(crate) fn buffer_write(
&mut self,
- shard_id: ShardId,
- table_id: TableId,
- partition_id: PartitionId,
- table_name: &TableName,
- ) -> Option<Arc<PersistingBatch>> {
- if self.persisting.is_some() {
- panic!("Unable to snapshot while persisting. This is an unexpected state.")
- }
-
- if let Some(queryable_batch) = self.snapshot_to_queryable_batch(table_name, partition_id) {
- let persisting_batch = Arc::new(PersistingBatch {
- shard_id,
- table_id,
- partition_id,
- object_store_id: Uuid::new_v4(),
- data: Arc::new(queryable_batch),
- });
-
- self.persisting = Some(Arc::clone(&persisting_batch));
-
- Some(persisting_batch)
- } else {
- None
- }
- }
-
- /// Return a QueryableBatch of the persisting batch after applying new tombstones
- pub(super) fn get_persisting_data(&self) -> Option<QueryableBatch> {
- let persisting = match &self.persisting {
- Some(p) => p,
- None => return None,
- };
-
- // persisting data
- Some((*persisting.data).clone())
- }
-
- /// Return the progress in this DataBuffer
- pub(super) fn progress(&self) -> ShardProgress {
- let progress = ShardProgress::new();
-
- let progress = if let Some(buffer) = &self.buffer {
- progress.combine(buffer.progress())
- } else {
- progress
- };
-
- let progress = self.snapshots.iter().fold(progress, |progress, snapshot| {
- progress.combine(snapshot.progress())
- });
-
- if let Some(persisting) = &self.persisting {
- persisting
- .data
- .data
- .iter()
- .fold(progress, |progress, snapshot| {
- progress.combine(snapshot.progress())
- })
- } else {
- progress
- }
+ mb: MutableBatch,
+ sequence_number: SequenceNumber,
+ ) -> Result<(), mutable_batch::Error> {
+ // Take ownership of the FSM and apply the write.
+ self.0.mutate(|fsm| match fsm {
+ // Mutable stats simply have the write applied.
+ FsmState::Buffering(mut b) => {
+ let ret = b.write(mb, sequence_number);
+ (FsmState::Buffering(b), ret)
+ }
+ })
}
- #[cfg(test)]
- pub(super) fn get_snapshots(&self) -> &[Arc<SnapshotBatch>] {
- self.snapshots.as_ref()
+ /// Return all data for this buffer, ordered by the [`SequenceNumber`] from
+ /// which it was buffered with.
+ pub(crate) fn get_query_data(&mut self) -> Vec<Arc<RecordBatch>> {
+ // Take ownership of the FSM and return the data within it.
+ self.0.mutate(|fsm| match fsm {
+ // The buffering state can return data.
+ FsmState::Buffering(b) => {
+ let ret = b.get_query_data();
+ (FsmState::Buffering(b), ret)
+ }
+ })
}
- pub(crate) fn mark_persisted(&mut self) {
- self.persisting = None;
- }
-}
-
-/// BufferBatch is a MutableBatch with its ingesting order, sequence_number, that helps the
-/// ingester keep the batches of data in their ingesting order
-#[derive(Debug)]
-pub(crate) struct BufferBatch {
- /// Sequence number of the first write in this batch
- pub(crate) min_sequence_number: SequenceNumber,
- /// Sequence number of the last write in this batch
- pub(super) max_sequence_number: SequenceNumber,
- /// Ingesting data
- pub(super) data: MutableBatch,
-}
-
-impl BufferBatch {
- /// Return the progress in this DataBuffer
- fn progress(&self) -> ShardProgress {
- ShardProgress::new()
- .with_buffered(self.min_sequence_number)
- .with_buffered(self.max_sequence_number)
- }
-}
-
-#[cfg(test)]
-mod tests {
- use mutable_batch_lp::test_helpers::lp_to_mutable_batch;
-
- use super::*;
-
- #[test]
- fn snapshot_empty_buffer_adds_no_snapshots() {
- let mut data_buffer = DataBuffer::default();
-
- data_buffer.generate_snapshot().unwrap();
-
- assert!(data_buffer.snapshots.is_empty());
- }
-
- #[test]
- fn snapshot_buffer_batch_moves_to_snapshots() {
- let mut data_buffer = DataBuffer::default();
-
- let seq_num1 = SequenceNumber::new(1);
- let (_, mutable_batch1) =
- lp_to_mutable_batch(r#"foo,t1=asdf iv=1i,uv=774u,fv=1.0,bv=true,sv="hi" 1"#);
- let buffer_batch1 = BufferBatch {
- min_sequence_number: seq_num1,
- max_sequence_number: seq_num1,
- data: mutable_batch1,
+ // Deconstruct the [`DataBuffer`] into the underlying FSM in a
+ // [`Persisting`] state, if the buffer contains any data.
+ pub(crate) fn into_persisting(self) -> Option<BufferState<Persisting>> {
+ let p = match self.0.into_inner() {
+ FsmState::Buffering(b) => {
+ // Attempt to snapshot the buffer to an immutable state.
+ match b.snapshot() {
+ Transition::Ok(b) => b.into_persisting(),
+ Transition::Unchanged(_) => {
+ // The buffer contains no data.
+ return None;
+ }
+ }
+ }
};
- let record_batch1 = buffer_batch1.data.to_arrow(Selection::All).unwrap();
- data_buffer.buffer = Some(buffer_batch1);
-
- data_buffer.generate_snapshot().unwrap();
-
- assert!(data_buffer.buffer.is_none());
- assert_eq!(data_buffer.snapshots.len(), 1);
- let snapshot = &data_buffer.snapshots[0];
- assert_eq!(snapshot.min_sequence_number, seq_num1);
- assert_eq!(snapshot.max_sequence_number, seq_num1);
- assert_eq!(&*snapshot.data, &record_batch1);
+ Some(p)
}
}
diff --git a/ingester/src/data/partition/buffer/always_some.rs b/ingester/src/data/partition/buffer/always_some.rs
index 2ae59380e0..ce85e4accd 100644
--- a/ingester/src/data/partition/buffer/always_some.rs
+++ b/ingester/src/data/partition/buffer/always_some.rs
@@ -1,31 +1,8 @@
//! A helper type that ensures an `Option` is always `Some` once the guard is
//! dropped.
-/// A guard through which a value can be placed back into the [`AlwaysSome`].
-#[derive(Debug)]
-#[must_use = "Guard must be used to restore the value"]
-pub(super) struct Guard<'a, T>(&'a mut Option<T>);
-
-impl<'a, T> Guard<'a, T> {
- /// Store `value` in the [`AlwaysSome`] for subsequent
- /// [`AlwaysSome::take()`] calls.
- pub(super) fn store(self, value: T) {
- assert!(self.0.is_none());
- *self.0 = Some(value);
- }
-}
-
-/// A helper type that aims to ease working with an [`Option`] that must always
-/// be restored in a given scope.
-///
-/// Accessing the value within an [`AlwaysSome`] returns a [`Guard`], which MUST
-/// be used to store the value before going out of scope. Failure to store a
-/// value cause a subsequent [`Self::take()`] call to panic.
-///
-/// Failing to store a value in the [`Guard`] causes a compiler warning, however
-/// this does not prevent failing to return a value to the [`AlwaysSome`] as the
-/// warning can be falsely silenced by using it within one conditional code path
-/// and not the other.
+/// A helper type that aims to ease calling methods on a type that takes `self`,
+/// that must always be restored at the end of the method call.
#[derive(Debug)]
pub(super) struct AlwaysSome<T>(Option<T>);
@@ -52,14 +29,14 @@ impl<T> AlwaysSome<T> {
Self(Some(value))
}
- /// Read the value.
- pub(super) fn take(&mut self) -> (Guard<'_, T>, T) {
+ pub(super) fn mutate<F, R>(&mut self, f: F) -> R
+ where
+ F: FnOnce(T) -> (T, R),
+ {
let value = std::mem::take(&mut self.0);
-
- (
- Guard(&mut self.0),
- value.expect("AlwaysSome value is None!"),
- )
+ let (value, ret) = f(value.expect("AlwaysSome value is None!"));
+ self.0 = Some(value);
+ ret
}
/// Deconstruct `self`, returning the inner value.
@@ -76,24 +53,18 @@ mod tests {
fn test_always_some() {
let mut a = AlwaysSome::<usize>::default();
- let (guard, value) = a.take();
- assert_eq!(value, 0);
- guard.store(42);
-
- let (guard, value) = a.take();
- assert_eq!(value, 42);
- guard.store(24);
+ let ret = a.mutate(|value| {
+ assert_eq!(value, 0);
+ (42, true)
+ });
+ assert!(ret);
- assert_eq!(a.into_inner(), 24);
- }
+ let ret = a.mutate(|value| {
+ assert_eq!(value, 42);
+ (13, "bananas")
+ });
+ assert_eq!(ret, "bananas");
- #[test]
- #[should_panic = "AlwaysSome value is None!"]
- fn test_drops_guard() {
- let mut a = AlwaysSome::<usize>::default();
- {
- let _ = a.take();
- }
- let _ = a.take();
+ assert_eq!(a.into_inner(), 13);
}
}
diff --git a/ingester/src/data/partition/buffer/state_machine.rs b/ingester/src/data/partition/buffer/state_machine.rs
index 278d0384f9..f15e714a04 100644
--- a/ingester/src/data/partition/buffer/state_machine.rs
+++ b/ingester/src/data/partition/buffer/state_machine.rs
@@ -31,7 +31,7 @@ pub(crate) enum Transition<A, B> {
impl<A, B> Transition<A, B> {
/// A helper function to construct [`Self::Ok`] variants.
- pub(super) fn ok(v: A, sequence_range: SequenceNumberRange) -> Transition<A, B> {
+ pub(super) fn ok(v: A, sequence_range: SequenceNumberRange) -> Self {
Self::Ok(BufferState {
state: v,
sequence_range,
@@ -39,7 +39,7 @@ impl<A, B> Transition<A, B> {
}
/// A helper function to construct [`Self::Unchanged`] variants.
- pub(super) fn unchanged(v: BufferState<B>) -> Transition<A, B> {
+ pub(super) fn unchanged(v: BufferState<B>) -> Self {
Self::Unchanged(v)
}
}
@@ -164,7 +164,7 @@ mod tests {
// Keep the data to validate they are ref-counted copies after further
// writes below. Note this construct allows the caller to decide when/if
// to allocate.
- let w1_data = buffer.get_query_data().to_owned();
+ let w1_data = buffer.get_query_data();
let expected = vec![
"+-------+----------+----------+--------------------------------+",
@@ -193,7 +193,7 @@ mod tests {
};
// Verify the writes are still queryable.
- let w2_data = buffer.get_query_data().to_owned();
+ let w2_data = buffer.get_query_data();
let expected = vec![
"+-------+----------+----------+--------------------------------+",
"| great | how_much | tag | time |",
@@ -214,7 +214,7 @@ mod tests {
let same_arcs = w2_data
.iter()
.zip(second_read.iter())
- .all(|(a, b)| Arc::ptr_eq(a, &b));
+ .all(|(a, b)| Arc::ptr_eq(a, b));
assert!(same_arcs);
}
diff --git a/ingester/src/data/partition/resolver/catalog.rs b/ingester/src/data/partition/resolver/catalog.rs
index a7189e632b..ef34b6e681 100644
--- a/ingester/src/data/partition/resolver/catalog.rs
+++ b/ingester/src/data/partition/resolver/catalog.rs
@@ -157,7 +157,7 @@ mod tests {
.repositories()
.await
.partitions()
- .get_by_id(got.id)
+ .get_by_id(got.partition_id)
.await
.unwrap()
.expect("partition not created");
diff --git a/ingester/src/data/sequence_range.rs b/ingester/src/data/sequence_range.rs
index d8e2bb2033..42c4a15baa 100644
--- a/ingester/src/data/sequence_range.rs
+++ b/ingester/src/data/sequence_range.rs
@@ -39,7 +39,7 @@ impl SequenceNumberRange {
let merged_range = self
.range
.into_iter()
- .chain(other.range.clone())
+ .chain(other.range)
.reduce(|a, b| (a.0.min(b.0), a.1.max(b.1)));
Self {
diff --git a/ingester/src/data/table.rs b/ingester/src/data/table.rs
index 3e0fd0d6c4..e314dc3821 100644
--- a/ingester/src/data/table.rs
+++ b/ingester/src/data/table.rs
@@ -7,8 +7,11 @@ use mutable_batch::MutableBatch;
use observability_deps::tracing::*;
use write_summary::ShardProgress;
-use super::partition::{resolver::PartitionProvider, PartitionData, UnpersistedPartitionData};
-use crate::{data::DmlApplyAction, lifecycle::LifecycleHandle, querier_handler::PartitionStatus};
+use super::{
+ partition::{resolver::PartitionProvider, PartitionData},
+ DmlApplyAction,
+};
+use crate::lifecycle::LifecycleHandle;
/// A double-referenced map where [`PartitionData`] can be looked up by
/// [`PartitionKey`], or ID.
@@ -72,6 +75,12 @@ impl std::ops::Deref for TableName {
}
}
+impl PartialEq<str> for TableName {
+ fn eq(&self, other: &str) -> bool {
+ &*self.0 == other
+ }
+}
+
/// Data of a Table in a given Namesapce that belongs to a given Shard
#[derive(Debug)]
pub(crate) struct TableData {
@@ -119,16 +128,6 @@ impl TableData {
}
}
- /// Return parquet_max_sequence_number
- pub(super) fn parquet_max_sequence_number(&self) -> Option<SequenceNumber> {
- self.partition_data
- .by_key
- .values()
- .map(|p| p.max_persisted_sequence_number())
- .max()
- .flatten()
- }
-
// buffers the table write and returns true if the lifecycle manager indicates that
// ingest should be paused.
pub(super) async fn buffer_table_write(
@@ -171,7 +170,7 @@ impl TableData {
let size = batch.size();
let rows = batch.rows();
- partition_data.buffer_write(sequence_number, batch)?;
+ partition_data.buffer_write(batch, sequence_number)?;
// Record the write as having been buffered.
//
@@ -191,6 +190,18 @@ impl TableData {
Ok(DmlApplyAction::Applied(should_pause))
}
+ /// Return a mutable reference to all partitions buffered for this table.
+ ///
+ /// # Ordering
+ ///
+ /// The order of [`PartitionData`] in the iterator is arbitrary and should
+ /// not be relied upon.
+ pub(crate) fn partition_iter_mut(
+ &mut self,
+ ) -> impl Iterator<Item = &mut PartitionData> + ExactSizeIterator {
+ self.partition_data.by_key.values_mut()
+ }
+
/// Return the [`PartitionData`] for the specified ID.
#[allow(unused)]
pub(crate) fn get_partition(
@@ -209,43 +220,12 @@ impl TableData {
self.partition_data.by_key(partition_key)
}
- /// Return the [`PartitionData`] for the specified partition key.
- pub(crate) fn get_partition_by_key_mut(
- &mut self,
- partition_key: &PartitionKey,
- ) -> Option<&mut PartitionData> {
- self.partition_data.by_key_mut(partition_key)
- }
-
- pub(crate) fn unpersisted_partition_data(&self) -> Vec<UnpersistedPartitionData> {
- self.partition_data
- .by_key
- .values()
- .map(|p| UnpersistedPartitionData {
- partition_id: p.partition_id(),
- non_persisted: p
- .get_non_persisting_data()
- .expect("get_non_persisting should always work"),
- persisting: p.get_persisting_data(),
- partition_status: PartitionStatus {
- parquet_max_sequence_number: p.max_persisted_sequence_number(),
- },
- })
- .collect()
- }
-
/// Return progress from this Table
pub(super) fn progress(&self) -> ShardProgress {
- let progress = ShardProgress::new();
- let progress = match self.parquet_max_sequence_number() {
- Some(n) => progress.with_persisted(n),
- None => progress,
- };
-
self.partition_data
.by_key
.values()
- .fold(progress, |progress, partition_data| {
+ .fold(Default::default(), |progress, partition_data| {
progress.combine(partition_data.progress())
})
}
@@ -259,6 +239,16 @@ impl TableData {
pub(crate) fn table_name(&self) -> &TableName {
&self.table_name
}
+
+ /// Return the shard ID for this table.
+ pub(crate) fn shard_id(&self) -> ShardId {
+ self.shard_id
+ }
+
+ /// Return the [`NamespaceId`] this table is a part of.
+ pub fn namespace_id(&self) -> NamespaceId {
+ self.namespace_id
+ }
}
#[cfg(test)]
diff --git a/ingester/src/lib.rs b/ingester/src/lib.rs
index f2ebc67cc0..eee2692e26 100644
--- a/ingester/src/lib.rs
+++ b/ingester/src/lib.rs
@@ -24,7 +24,7 @@ mod job;
pub mod lifecycle;
mod poison;
pub mod querier_handler;
-pub(crate) mod query;
+pub(crate) mod query_adaptor;
pub mod server;
pub(crate) mod stream_handler;
diff --git a/ingester/src/querier_handler.rs b/ingester/src/querier_handler.rs
index 7eaa269289..4851571177 100644
--- a/ingester/src/querier_handler.rs
+++ b/ingester/src/querier_handler.rs
@@ -1,12 +1,7 @@
//! Handle all requests from Querier
-use crate::{
- data::{
- namespace::NamespaceName, partition::UnpersistedPartitionData, table::TableName,
- IngesterData,
- },
- query::QueryableBatch,
-};
+use std::{pin::Pin, sync::Arc};
+
use arrow::{array::new_null_array, error::ArrowError, record_batch::RecordBatch};
use arrow_util::optimize::{optimize_record_batch, optimize_schema};
use data_types::{PartitionId, SequenceNumber};
@@ -17,9 +12,10 @@ use generated_types::ingester::IngesterQueryRequest;
use observability_deps::tracing::debug;
use schema::{merge::SchemaMerger, selection::Selection};
use snafu::{ensure, Snafu};
-use std::{pin::Pin, sync::Arc};
use trace::span::{Span, SpanRecorder};
+use crate::data::{namespace::NamespaceName, table::TableName, IngesterData};
+
/// Number of table data read locks that shall be acquired in parallel
const CONCURRENT_TABLE_DATA_LOCKS: usize = 10;
@@ -264,10 +260,11 @@ pub async fn prepare_data_to_querier(
) -> Result<IngesterQueryResponse> {
debug!(?request, "prepare_data_to_querier");
- let span_recorder = SpanRecorder::new(span);
+ let mut span_recorder = SpanRecorder::new(span);
- let mut tables_data = vec![];
+ let mut table_refs = vec![];
let mut found_namespace = false;
+
for (shard_id, shard_data) in ingest_data.shards() {
debug!(shard_id=%shard_id.get());
let namespace_name = NamespaceName::from(&request.namespace);
@@ -293,7 +290,7 @@ pub async fn prepare_data_to_querier(
}
};
- tables_data.push(table_data);
+ table_refs.push(table_data);
}
ensure!(
@@ -303,113 +300,83 @@ pub async fn prepare_data_to_querier(
},
);
- // acquire locks in parallel
- let unpersisted_partitions: Vec<_> = futures::stream::iter(tables_data)
- .map(|table_data| async move {
- let table_data = table_data.read().await;
- table_data.unpersisted_partition_data()
- })
- // Note: the order doesn't matter
- .buffer_unordered(CONCURRENT_TABLE_DATA_LOCKS)
- .concat()
- .await;
-
ensure!(
- !unpersisted_partitions.is_empty(),
+ !table_refs.is_empty(),
TableNotFoundSnafu {
namespace_name: &request.namespace,
table_name: &request.table
},
);
- let request = Arc::clone(request);
- let partitions =
- futures::stream::iter(unpersisted_partitions.into_iter().map(move |partition| {
- // extract payload
- let partition_id = partition.partition_id;
- let status = partition.partition_status.clone();
- let snapshots: Vec<_> = prepare_data_to_querier_for_partition(
- partition,
- &request,
- span_recorder.child_span("ingester prepare data to querier for partition"),
- )
- .into_iter()
- .map(Ok)
- .collect();
-
- // Note: include partition in `unpersisted_partitions` even when there we might filter
- // out all the data, because the metadata (e.g. max persisted parquet file) is
- // important for the querier.
- Ok(IngesterQueryPartition::new(
- Box::pin(futures::stream::iter(snapshots)),
- partition_id,
- status,
- ))
- }));
-
- Ok(IngesterQueryResponse::new(Box::pin(partitions)))
-}
-
-fn prepare_data_to_querier_for_partition(
- unpersisted_partition_data: UnpersistedPartitionData,
- request: &IngesterQueryRequest,
- span: Option<Span>,
-) -> Vec<SendableRecordBatchStream> {
- let mut span_recorder = SpanRecorder::new(span);
+ // acquire locks and read table data in parallel
+ let unpersisted_partitions: Vec<_> = futures::stream::iter(table_refs)
+ .map(|table_data| async move {
+ let mut table_data = table_data.write().await;
+ table_data
+ .partition_iter_mut()
+ .map(|p| {
+ (
+ p.partition_id(),
+ p.get_query_data(),
+ p.max_persisted_sequence_number(),
+ )
+ })
+ .collect::<Vec<_>>()
+ })
+ // Note: the order doesn't matter
+ .buffer_unordered(CONCURRENT_TABLE_DATA_LOCKS)
+ .concat()
+ .await;
- // ------------------------------------------------
- // Accumulate data
+ let request = Arc::clone(request);
+ let partitions = futures::stream::iter(unpersisted_partitions.into_iter().map(
+ move |(partition_id, data, max_persisted_sequence_number)| {
+ let snapshots = match data {
+ None => Box::pin(futures::stream::empty()) as SnapshotStream,
- // Make Filters
- let selection_columns: Vec<_> = request.columns.iter().map(String::as_str).collect();
- let selection = if selection_columns.is_empty() {
- Selection::All
- } else {
- Selection::Some(&selection_columns)
- };
+ Some(batch) => {
+ assert_eq!(partition_id, batch.partition_id());
- // figure out what batches
- let queryable_batch = unpersisted_partition_data
- .persisting
- .unwrap_or_else(|| {
- QueryableBatch::new(
- request.table.clone().into(),
- unpersisted_partition_data.partition_id,
- vec![],
- )
- })
- .with_data(unpersisted_partition_data.non_persisted);
-
- let streams = queryable_batch
- .data
- .iter()
- .map(|snapshot_batch| {
- let batch = snapshot_batch.data.as_ref();
- let schema = batch.schema();
-
- // Apply selection to in-memory batch
- let batch = match selection {
- Selection::All => batch.clone(),
- Selection::Some(columns) => {
- let projection = columns
+ // Project the data if necessary
+ let columns = request
+ .columns
.iter()
- .flat_map(|&column_name| {
- // ignore non-existing columns
- schema.index_of(column_name).ok()
- })
+ .map(String::as_str)
.collect::<Vec<_>>();
- batch.project(&projection).expect("bug in projection")
+ let selection = if columns.is_empty() {
+ Selection::All
+ } else {
+ Selection::Some(columns.as_ref())
+ };
+
+ let snapshots = batch.project_selection(selection).into_iter().map(|batch| {
+ // Create a stream from the batch.
+ Ok(Box::pin(MemoryStream::new(vec![batch])) as SendableRecordBatchStream)
+ });
+
+ Box::pin(futures::stream::iter(snapshots)) as SnapshotStream
}
};
- // create stream
- Box::pin(MemoryStream::new(vec![batch])) as SendableRecordBatchStream
- })
- .collect();
+ // NOTE: the partition persist watermark MUST always be provided to
+ // the querier for any partition that has performed (or is aware of)
+ // a persist operation.
+ //
+ // This allows the querier to use the per-partition persist marker
+ // when planning queries.
+ Ok(IngesterQueryPartition::new(
+ snapshots,
+ partition_id,
+ PartitionStatus {
+ parquet_max_sequence_number: max_persisted_sequence_number,
+ },
+ ))
+ },
+ ));
span_recorder.ok("done");
- streams
+ Ok(IngesterQueryResponse::new(Box::pin(partitions)))
}
#[cfg(test)]
@@ -427,7 +394,7 @@ mod tests {
use predicate::Predicate;
use super::*;
- use crate::test_util::{make_ingester_data, DataLocation, TEST_NAMESPACE, TEST_TABLE};
+ use crate::test_util::{make_ingester_data, TEST_NAMESPACE, TEST_TABLE};
#[tokio::test]
async fn test_ingester_query_response_flatten() {
@@ -517,23 +484,11 @@ mod tests {
async fn test_prepare_data_to_querier() {
test_helpers::maybe_start_logging();
- let span = None;
-
// make 14 scenarios for ingester data
let mut scenarios = vec![];
for two_partitions in [false, true] {
- for loc in [
- DataLocation::BUFFER,
- DataLocation::BUFFER_SNAPSHOT,
- DataLocation::BUFFER_PERSISTING,
- DataLocation::BUFFER_SNAPSHOT_PERSISTING,
- DataLocation::SNAPSHOT,
- DataLocation::SNAPSHOT_PERSISTING,
- DataLocation::PERSISTING,
- ] {
- let scenario = Arc::new(make_ingester_data(two_partitions, loc).await);
- scenarios.push((loc, scenario));
- }
+ let scenario = Arc::new(make_ingester_data(two_partitions).await);
+ scenarios.push(scenario);
}
// read data from all scenarios without any filters
@@ -557,9 +512,8 @@ mod tests {
"| Wilmington | mon | | 1970-01-01T00:00:00.000000035Z |", // in group 3 - seq_num: 6
"+------------+-----+------+--------------------------------+",
];
- for (loc, scenario) in &scenarios {
- println!("Location: {loc:?}");
- let result = prepare_data_to_querier(scenario, &request, span.clone())
+ for scenario in &scenarios {
+ let result = prepare_data_to_querier(scenario, &request, None)
.await
.unwrap()
.into_record_batches()
@@ -593,9 +547,8 @@ mod tests {
"| Wilmington | | 1970-01-01T00:00:00.000000035Z |",
"+------------+------+--------------------------------+",
];
- for (loc, scenario) in &scenarios {
- println!("Location: {loc:?}");
- let result = prepare_data_to_querier(scenario, &request, span.clone())
+ for scenario in &scenarios {
+ let result = prepare_data_to_querier(scenario, &request, None)
.await
.unwrap()
.into_record_batches()
@@ -638,9 +591,8 @@ mod tests {
"| Wilmington | | 1970-01-01T00:00:00.000000035Z |",
"+------------+------+--------------------------------+",
];
- for (loc, scenario) in &scenarios {
- println!("Location: {loc:?}");
- let result = prepare_data_to_querier(scenario, &request, span.clone())
+ for scenario in &scenarios {
+ let result = prepare_data_to_querier(scenario, &request, None)
.await
.unwrap()
.into_record_batches()
@@ -655,9 +607,8 @@ mod tests {
vec![],
None,
));
- for (loc, scenario) in &scenarios {
- println!("Location: {loc:?}");
- let err = prepare_data_to_querier(scenario, &request, span.clone())
+ for scenario in &scenarios {
+ let err = prepare_data_to_querier(scenario, &request, None)
.await
.unwrap_err();
assert_matches!(err, Error::TableNotFound { .. });
@@ -670,9 +621,8 @@ mod tests {
vec![],
None,
));
- for (loc, scenario) in &scenarios {
- println!("Location: {loc:?}");
- let err = prepare_data_to_querier(scenario, &request, span.clone())
+ for scenario in &scenarios {
+ let err = prepare_data_to_querier(scenario, &request, None)
.await
.unwrap_err();
assert_matches!(err, Error::NamespaceNotFound { .. });
diff --git a/ingester/src/query.rs b/ingester/src/query_adaptor.rs
similarity index 57%
rename from ingester/src/query.rs
rename to ingester/src/query_adaptor.rs
index dc38001e4f..67902ab83f 100644
--- a/ingester/src/query.rs
+++ b/ingester/src/query_adaptor.rs
@@ -1,12 +1,12 @@
-//! Module to handle query on Ingester's data
+//! An adaptor over a set of [`RecordBatch`] allowing them to be used as an IOx
+//! [`QueryChunk`].
use std::{any::Any, sync::Arc};
use arrow::record_batch::RecordBatch;
use arrow_util::util::ensure_schema;
use data_types::{
- ChunkId, ChunkOrder, DeletePredicate, PartitionId, SequenceNumber, TableSummary,
- TimestampMinMax,
+ ChunkId, ChunkOrder, DeletePredicate, PartitionId, TableSummary, TimestampMinMax,
};
use datafusion::{
error::DataFusionError,
@@ -21,11 +21,12 @@ use iox_query::{
QueryChunk, QueryChunkMeta,
};
use observability_deps::tracing::trace;
+use once_cell::sync::OnceCell;
use predicate::Predicate;
use schema::{merge::merge_record_batch_schemas, selection::Selection, sort::SortKey, Schema};
use snafu::{ResultExt, Snafu};
-use crate::data::{partition::SnapshotBatch, table::TableName};
+use crate::data::table::TableName;
#[allow(clippy::enum_variant_names)]
#[derive(Debug, Snafu)]
@@ -47,72 +48,106 @@ pub enum Error {
/// A specialized `Error` for Ingester's Query errors
pub type Result<T, E = Error> = std::result::Result<T, E>;
-/// Queryable data used for both query and persistence
+/// A queryable wrapper over a set of ordered [`RecordBatch`] snapshot from a
+/// single [`PartitionData`].
+///
+/// It is an invariant that a [`QueryAdaptor`] MUST always contain at least one
+/// row. This frees the caller of having to reason about empty [`QueryAdaptor`]
+/// instances yielding empty [`RecordBatch`].
+///
+/// [`PartitionData`]: crate::data::partition::PartitionData
#[derive(Debug, PartialEq, Clone)]
-pub(crate) struct QueryableBatch {
- /// data
- pub(crate) data: Vec<Arc<SnapshotBatch>>,
+pub(crate) struct QueryAdaptor {
+ /// The snapshot data from a partition.
+ ///
+ /// This MUST be non-pub / closed for modification / immutable to support
+ /// interning the merged schema in [`Self::schema()`].
+ data: Vec<Arc<RecordBatch>>,
+
+ /// The name of the table this data is part of.
+ table_name: TableName,
- /// This is needed to return a reference for a trait function
- pub(crate) table_name: TableName,
+ /// The catalog ID of the partition the this data is part of.
+ partition_id: PartitionId,
- /// Partition ID
- pub(crate) partition_id: PartitionId,
+ /// An interned schema for all [`RecordBatch`] in data.
+ schema: OnceCell<Arc<Schema>>,
}
-impl QueryableBatch {
- /// Initilaize a QueryableBatch
+impl QueryAdaptor {
+ /// Construct a [`QueryAdaptor`].
+ ///
+ /// # Panics
+ ///
+ /// This constructor panics if `data` contains no [`RecordBatch`], or all
+ /// [`RecordBatch`] are empty.
pub(crate) fn new(
table_name: TableName,
partition_id: PartitionId,
- data: Vec<Arc<SnapshotBatch>>,
+ data: Vec<Arc<RecordBatch>>,
) -> Self {
+ // There must always be at least one record batch and one row.
+ //
+ // This upholds an invariant that simplifies dealing with empty
+ // partitions - if there is a QueryAdaptor, it contains data.
+ assert!(data.iter().map(|b| b.num_rows()).sum::<usize>() > 0);
+
Self {
data,
table_name,
partition_id,
+ schema: OnceCell::default(),
}
}
- /// Add snapshots to this batch
- pub(crate) fn with_data(mut self, mut data: Vec<Arc<SnapshotBatch>>) -> Self {
- self.data.append(&mut data);
- self
+ pub(crate) fn project_selection(&self, selection: Selection<'_>) -> Vec<RecordBatch> {
+ // Project the column selection across all RecordBatch
+ self.data
+ .iter()
+ .map(|data| {
+ let batch = data.as_ref();
+ let schema = batch.schema();
+
+ // Apply selection to in-memory batch
+ match selection {
+ Selection::All => batch.clone(),
+ Selection::Some(columns) => {
+ let projection = columns
+ .iter()
+ .flat_map(|&column_name| {
+ // ignore non-existing columns
+ schema.index_of(column_name).ok()
+ })
+ .collect::<Vec<_>>();
+ batch.project(&projection).expect("bug in projection")
+ }
+ }
+ })
+ .collect()
}
- /// return min and max of all the snapshots
- pub(crate) fn min_max_sequence_numbers(&self) -> (SequenceNumber, SequenceNumber) {
- let min = self
- .data
- .first()
- .expect("The Queryable Batch should not empty")
- .min_sequence_number;
-
- let max = self
- .data
- .first()
- .expect("The Queryable Batch should not empty")
- .max_sequence_number;
-
- assert!(min <= max);
+ /// Returns the [`RecordBatch`] instances in this [`QueryAdaptor`].
+ pub(crate) fn record_batches(&self) -> &[Arc<RecordBatch>] {
+ self.data.as_ref()
+ }
- (min, max)
+ /// Returns the partition ID from which the data this [`QueryAdaptor`] was
+ /// sourced from.
+ pub(crate) fn partition_id(&self) -> PartitionId {
+ self.partition_id
}
}
-impl QueryChunkMeta for QueryableBatch {
+impl QueryChunkMeta for QueryAdaptor {
fn summary(&self) -> Option<Arc<TableSummary>> {
None
}
fn schema(&self) -> Arc<Schema> {
- // TODO: may want store this schema as a field of QueryableBatch and
- // only do this schema merge the first time it is called
-
- // Merge schema of all RecordBatches of the PerstingBatch
- let batches: Vec<Arc<RecordBatch>> =
- self.data.iter().map(|s| Arc::clone(&s.data)).collect();
- merge_record_batch_schemas(&batches)
+ Arc::clone(
+ self.schema
+ .get_or_init(|| merge_record_batch_schemas(&self.data)),
+ )
}
fn partition_sort_key(&self) -> Option<&SortKey> {
@@ -139,11 +174,11 @@ impl QueryChunkMeta for QueryableBatch {
}
}
-impl QueryChunk for QueryableBatch {
+impl QueryChunk for QueryAdaptor {
// This function should not be used in QueryBatch context
fn id(&self) -> ChunkId {
- // To return a value for debugging and make it consistent with ChunkId created in Compactor,
- // use Uuid for this
+ // To return a value for debugging and make it consistent with ChunkId
+ // created in Compactor, use Uuid for this
ChunkId::new()
}
@@ -152,10 +187,11 @@ impl QueryChunk for QueryableBatch {
&self.table_name
}
- /// Returns true if the chunk may contain a duplicate "primary
- /// key" within itself
+ /// Returns true if the chunk may contain a duplicate "primary key" within
+ /// itself
fn may_contain_pk_duplicates(&self) -> bool {
- // always true because they are not deduplicated yet
+ // always true because the rows across record batches have not been
+ // de-duplicated.
true
}
@@ -204,22 +240,15 @@ impl QueryChunk for QueryableBatch {
.context(SchemaSnafu)
.map_err(|e| DataFusionError::External(Box::new(e)))?;
- // Get all record batches from their snapshots
+ // Apply the projection over all the data in self, ensuring each batch
+ // has the specified schema.
let batches = self
- .data
- .iter()
- .filter_map(|snapshot| {
- let batch = snapshot
- // Only return columns in the selection
- .scan(selection)
- .context(FilterColumnsSnafu {})
- .transpose()?
- // ensure batch has desired schema
- .and_then(|batch| {
- ensure_schema(&schema.as_arrow(), &batch).context(ConcatBatchesSnafu {})
- })
- .map(Arc::new);
- Some(batch)
+ .project_selection(selection)
+ .into_iter()
+ .map(|batch| {
+ ensure_schema(&schema.as_arrow(), &batch)
+ .context(ConcatBatchesSnafu {})
+ .map(Arc::new)
})
.collect::<Result<Vec<_>, _>>()
.map_err(|e| DataFusionError::External(Box::new(e)))?;
@@ -233,10 +262,9 @@ impl QueryChunk for QueryableBatch {
/// Returns chunk type
fn chunk_type(&self) -> &str {
- "PersistingBatch"
+ "QueryAdaptor"
}
- // This function should not be used in PersistingBatch context
fn order(&self) -> ChunkOrder {
unimplemented!()
}
diff --git a/ingester/src/test_util.rs b/ingester/src/test_util.rs
index 693d9cbdc9..e58c573e11 100644
--- a/ingester/src/test_util.rs
+++ b/ingester/src/test_util.rs
@@ -7,9 +7,8 @@ use std::{sync::Arc, time::Duration};
use arrow::record_batch::RecordBatch;
use arrow_util::assert_batches_eq;
-use bitflags::bitflags;
use data_types::{
- NamespaceId, PartitionId, PartitionKey, Sequence, SequenceNumber, ShardId, ShardIndex, TableId,
+ NamespaceId, PartitionKey, Sequence, SequenceNumber, ShardId, ShardIndex, TableId,
};
use dml::{DmlMeta, DmlOperation, DmlWrite};
use iox_catalog::{interface::Catalog, mem::MemCatalog};
@@ -17,71 +16,12 @@ use iox_query::test::{raw_data, TestChunk};
use iox_time::{SystemProvider, Time};
use mutable_batch_lp::lines_to_batches;
use object_store::memory::InMemory;
-use uuid::Uuid;
use crate::{
- data::{
- partition::{resolver::CatalogPartitionResolver, PersistingBatch, SnapshotBatch},
- IngesterData,
- },
+ data::{partition::resolver::CatalogPartitionResolver, IngesterData},
lifecycle::{LifecycleConfig, LifecycleManager},
- query::QueryableBatch,
};
-#[allow(clippy::too_many_arguments)]
-pub(crate) fn make_persisting_batch(
- shard_id: i64,
- seq_num_start: i64,
- table_id: i64,
- table_name: &str,
- partition_id: i64,
- object_store_id: Uuid,
- batches: Vec<Arc<RecordBatch>>,
-) -> Arc<PersistingBatch> {
- let queryable_batch = make_queryable_batch(table_name, partition_id, seq_num_start, batches);
- Arc::new(PersistingBatch {
- shard_id: ShardId::new(shard_id),
- table_id: TableId::new(table_id),
- partition_id: PartitionId::new(partition_id),
- object_store_id,
- data: queryable_batch,
- })
-}
-
-pub(crate) fn make_queryable_batch(
- table_name: &str,
- partition_id: i64,
- seq_num_start: i64,
- batches: Vec<Arc<RecordBatch>>,
-) -> Arc<QueryableBatch> {
- // make snapshots for the batches
- let mut snapshots = vec![];
- let mut seq_num = seq_num_start;
- for batch in batches {
- let seq = SequenceNumber::new(seq_num);
- snapshots.push(Arc::new(make_snapshot_batch(batch, seq, seq)));
- seq_num += 1;
- }
-
- Arc::new(QueryableBatch::new(
- table_name.into(),
- PartitionId::new(partition_id),
- snapshots,
- ))
-}
-
-pub(crate) fn make_snapshot_batch(
- batch: Arc<RecordBatch>,
- min: SequenceNumber,
- max: SequenceNumber,
-) -> SnapshotBatch {
- SnapshotBatch {
- min_sequence_number: min,
- max_sequence_number: max,
- data: batch,
- }
-}
-
pub(crate) async fn create_one_row_record_batch_with_influxtype() -> Vec<Arc<RecordBatch>> {
let chunk1 = Arc::new(
TestChunk::new("t")
@@ -506,32 +446,9 @@ pub(crate) const TEST_TABLE: &str = "test_table";
pub(crate) const TEST_PARTITION_1: &str = "test+partition_1";
pub(crate) const TEST_PARTITION_2: &str = "test+partition_2";
-bitflags! {
- /// Make the same in-memory data but data are split between:
- /// . one or two partition
- /// . The first partition will have a choice to have data in either
- /// . buffer only
- /// . snapshot only
- /// . persisting only
- /// . buffer + snapshot
- /// . buffer + persisting
- /// . snapshot + persisting
- /// . buffer + snapshot + persisting
- /// . If the second partittion exists, it only has data in its buffer
- pub(crate) struct DataLocation: u8 {
- const BUFFER = 0b001;
- const SNAPSHOT = 0b010;
- const PERSISTING = 0b100;
- const BUFFER_SNAPSHOT = Self::BUFFER.bits | Self::SNAPSHOT.bits;
- const BUFFER_PERSISTING = Self::BUFFER.bits | Self::PERSISTING.bits;
- const SNAPSHOT_PERSISTING = Self::SNAPSHOT.bits | Self::PERSISTING.bits;
- const BUFFER_SNAPSHOT_PERSISTING = Self::BUFFER.bits | Self::SNAPSHOT.bits | Self::PERSISTING.bits;
- }
-}
-
/// This function produces one scenario but with the parameter combination (2*7),
/// you will be able to produce 14 scenarios by calling it in 2 loops
-pub(crate) async fn make_ingester_data(two_partitions: bool, loc: DataLocation) -> IngesterData {
+pub(crate) async fn make_ingester_data(two_partitions: bool) -> IngesterData {
// Whatever data because they won't be used in the tests
let metrics: Arc<metric::Registry> = Default::default();
let catalog: Arc<dyn Catalog> = Arc::new(MemCatalog::new(Arc::clone(&metrics)));
@@ -576,26 +493,6 @@ pub(crate) async fn make_ingester_data(two_partitions: bool, loc: DataLocation)
.unwrap();
}
- if loc.contains(DataLocation::PERSISTING) {
- // Move partition 1 data to persisting
- let _ignored = ingester
- .shard(shard_id)
- .unwrap()
- .namespace(&TEST_NAMESPACE.into())
- .unwrap()
- .snapshot_to_persisting(&TEST_TABLE.into(), &PartitionKey::from(TEST_PARTITION_1))
- .await;
- } else if loc.contains(DataLocation::SNAPSHOT) {
- // move partition 1 data to snapshot
- let _ignored = ingester
- .shard(shard_id)
- .unwrap()
- .namespace(&TEST_NAMESPACE.into())
- .unwrap()
- .snapshot(&TEST_TABLE.into(), &PartitionKey::from(TEST_PARTITION_1))
- .await;
- }
-
ingester
}
diff --git a/iox_query/src/provider.rs b/iox_query/src/provider.rs
index 8b9912f45e..d649345e6b 100644
--- a/iox_query/src/provider.rs
+++ b/iox_query/src/provider.rs
@@ -40,8 +40,7 @@ mod deduplicate;
pub mod overlap;
mod physical;
use self::overlap::group_potential_duplicates;
-pub(crate) use deduplicate::DeduplicateExec;
-pub use deduplicate::RecordBatchDeduplicator;
+pub use deduplicate::{DeduplicateExec, RecordBatchDeduplicator};
pub(crate) use physical::IOxReadFilterNode;
#[derive(Debug, Snafu)]
|
9c40d80032d8cebf8c6c1b794cdfa054c473b707
|
Dom Dwyer
|
2022-10-13 15:41:48
|
log shard_id in op result
|
Include the shard ID in the op apply result to correlate it with other
log messages.
| null |
refactor(ingester): log shard_id in op result
Include the shard ID in the op apply result to correlate it with other
log messages.
|
diff --git a/ingester/src/handler.rs b/ingester/src/handler.rs
index 981a43cd57..55c070464a 100644
--- a/ingester/src/handler.rs
+++ b/ingester/src/handler.rs
@@ -281,6 +281,7 @@ impl IngestHandlerImpl {
lifecycle_handle,
topic_name,
shard.shard_index,
+ shard.id,
&*metric_registry,
skip_to_oldest_available,
);
diff --git a/ingester/src/stream_handler/handler.rs b/ingester/src/stream_handler/handler.rs
index f592d2016a..68a49d3ff8 100644
--- a/ingester/src/stream_handler/handler.rs
+++ b/ingester/src/stream_handler/handler.rs
@@ -2,7 +2,7 @@
use std::{fmt::Debug, time::Duration};
-use data_types::{SequenceNumber, ShardIndex};
+use data_types::{SequenceNumber, ShardId, ShardIndex};
use dml::DmlOperation;
use futures::{pin_mut, FutureExt, StreamExt};
use iox_time::{SystemProvider, TimeProvider};
@@ -70,6 +70,7 @@ pub(crate) struct SequencedStreamHandler<I, O, T = SystemProvider> {
/// Log context fields - otherwise unused.
topic_name: String,
shard_index: ShardIndex,
+ shard_id: ShardId,
skip_to_oldest_available: bool,
}
@@ -89,6 +90,7 @@ impl<I, O> SequencedStreamHandler<I, O> {
lifecycle_handle: LifecycleHandleImpl,
topic_name: String,
shard_index: ShardIndex,
+ shard_id: ShardId,
metrics: &metric::Registry,
skip_to_oldest_available: bool,
) -> Self {
@@ -169,6 +171,7 @@ impl<I, O> SequencedStreamHandler<I, O> {
shard_reset_count,
topic_name,
shard_index,
+ shard_id,
skip_to_oldest_available,
}
}
@@ -192,6 +195,7 @@ impl<I, O> SequencedStreamHandler<I, O> {
shard_reset_count: self.shard_reset_count,
topic_name: self.topic_name,
shard_index: self.shard_index,
+ shard_id: self.shard_id,
skip_to_oldest_available: self.skip_to_oldest_available,
}
}
@@ -229,6 +233,7 @@ where
info!(
kafka_topic=%self.topic_name,
shard_index=%self.shard_index,
+ shard_id=%self.shard_id,
"stream handler shutdown",
);
return;
@@ -269,6 +274,7 @@ where
error=%e,
kafka_topic=%self.topic_name,
shard_index=%self.shard_index,
+ shard_id=%self.shard_id,
potential_data_loss=true,
"reset stream"
);
@@ -282,6 +288,7 @@ where
error=%e,
kafka_topic=%self.topic_name,
shard_index=%self.shard_index,
+ shard_id=%self.shard_id,
potential_data_loss=true,
"unable to read from desired sequence number offset"
);
@@ -294,6 +301,7 @@ where
error=%e,
kafka_topic=%self.topic_name,
shard_index=%self.shard_index,
+ shard_id=%self.shard_id,
"I/O error reading from shard"
);
tokio::time::sleep(Duration::from_secs(1)).await;
@@ -309,6 +317,7 @@ where
error=%e,
kafka_topic=%self.topic_name,
shard_index=%self.shard_index,
+ shard_id=%self.shard_id,
potential_data_loss=true,
"unable to deserialize dml operation"
);
@@ -331,6 +340,7 @@ something clever.",
error=%e,
kafka_topic=%self.topic_name,
shard_index=%self.shard_index,
+ shard_id=%self.shard_id,
potential_data_loss=true,
"unhandled error converting write buffer data to DmlOperation",
);
@@ -360,6 +370,7 @@ something clever.",
trace!(
kafka_topic=%self.topic_name,
shard_index=%self.shard_index,
+ shard_id=%self.shard_id,
op_size=op.size(),
op_namespace=op.namespace(),
?op_sequence_number,
@@ -377,6 +388,7 @@ something clever.",
trace!(
kafka_topic=%self.topic_name,
shard_index=%self.shard_index,
+ shard_id=%self.shard_id,
%should_pause,
?op_sequence_number,
"successfully applied dml operation"
@@ -388,6 +400,7 @@ something clever.",
error=%e,
kafka_topic=%self.topic_name,
shard_index=%self.shard_index,
+ shard_id=%self.shard_id,
?op_sequence_number,
potential_data_loss=true,
"failed to apply dml operation"
@@ -403,6 +416,7 @@ something clever.",
trace!(
kafka_topic=%self.topic_name,
shard_index=%self.shard_index,
+ shard_id=%self.shard_id,
delta=%delta.as_millis(),
"reporting TTBR for shard (ms)"
);
@@ -423,6 +437,7 @@ something clever.",
warn!(
kafka_topic=%self.topic_name,
shard_index=%self.shard_index,
+ shard_id=%self.shard_id,
"pausing ingest until persistence has run"
);
while !self.lifecycle_handle.can_resume_ingest() {
@@ -448,6 +463,7 @@ something clever.",
info!(
kafka_topic=%self.topic_name,
shard_index=%self.shard_index,
+ shard_id=%self.shard_id,
pause_duration=%duration_str,
"resuming ingest"
);
@@ -654,6 +670,7 @@ mod tests {
lifecycle.handle(),
TEST_TOPIC_NAME.to_string(),
TEST_SHARD_INDEX,
+ ShardId::new(42),
&*metrics,
$skip_to_oldest_available,
).with_time_provider(iox_time::MockProvider::new(*TEST_TIME));
@@ -1020,6 +1037,7 @@ mod tests {
lifecycle.handle(),
"topic_name".to_string(),
ShardIndex::new(42),
+ ShardId::new(24),
&*metrics,
false,
);
@@ -1067,6 +1085,7 @@ mod tests {
lifecycle.handle(),
"topic_name".to_string(),
ShardIndex::new(42),
+ ShardId::new(24),
&*metrics,
false,
);
|
5680710e75ee380d4101cd006e91d2b24d552bb4
|
Dom Dwyer
|
2023-03-20 14:15:30
|
full u64 range in SequenceNumberSet
|
Prior to this commit, the SequenceNumberSet accepted values up to
u32::MAX (approx 4.2 billion) which works out to be ~50 days of
continious ID allocation at 1k writes/s.
This commit changes the SequenceNumberSet to accept the full range of a
u64, with the caveat that outside of the u32 range, values may be
ordered incorrectly. See:
https://github.com/influxdata/influxdb_iox/issues/7260
Fortunately values from SequenceNumberSet are used as an identity, and
never for ordering. We should remove the PartialOrd bounds on
SequenceNumber once Kafka has been cleaned up to encode this in the type
system.
| null |
fix: full u64 range in SequenceNumberSet
Prior to this commit, the SequenceNumberSet accepted values up to
u32::MAX (approx 4.2 billion) which works out to be ~50 days of
continious ID allocation at 1k writes/s.
This commit changes the SequenceNumberSet to accept the full range of a
u64, with the caveat that outside of the u32 range, values may be
ordered incorrectly. See:
https://github.com/influxdata/influxdb_iox/issues/7260
Fortunately values from SequenceNumberSet are used as an identity, and
never for ordering. We should remove the PartialOrd bounds on
SequenceNumber once Kafka has been cleaned up to encode this in the type
system.
|
diff --git a/data_types/src/sequence_number_set.rs b/data_types/src/sequence_number_set.rs
index a1bca5ee9c..76ed04ebf9 100644
--- a/data_types/src/sequence_number_set.rs
+++ b/data_types/src/sequence_number_set.rs
@@ -1,10 +1,14 @@
//! A set of [`SequenceNumber`] instances.
+use std::collections::BTreeMap;
+
+use croaring::treemap::NativeSerializer;
+
use crate::SequenceNumber;
/// A space-efficient encoded set of [`SequenceNumber`].
#[derive(Debug, Default, Clone, PartialEq)]
-pub struct SequenceNumberSet(croaring::Bitmap);
+pub struct SequenceNumberSet(croaring::Treemap);
impl SequenceNumberSet {
/// Add the specified [`SequenceNumber`] to the set.
@@ -43,7 +47,9 @@ impl SequenceNumberSet {
///
/// [spec]: https://github.com/RoaringBitmap/RoaringFormatSpec/
pub fn to_bytes(&self) -> Vec<u8> {
- self.0.serialize()
+ self.0
+ .serialize()
+ .expect("failed to serialise sequence number set")
}
/// Return true if the specified [`SequenceNumber`] has been added to
@@ -70,7 +76,9 @@ impl SequenceNumberSet {
/// Initialise a [`SequenceNumberSet`] that is pre-allocated to contain up
/// to `n` elements without reallocating.
pub fn with_capacity(n: u32) -> Self {
- Self(croaring::Bitmap::create_with_capacity(n))
+ let mut map = BTreeMap::new();
+ map.insert(0, croaring::Bitmap::create_with_capacity(n));
+ Self(croaring::Treemap { map })
}
}
@@ -79,9 +87,9 @@ impl TryFrom<&[u8]> for SequenceNumberSet {
type Error = String;
fn try_from(buffer: &[u8]) -> Result<Self, Self::Error> {
- croaring::Bitmap::try_deserialize(buffer)
+ croaring::Treemap::deserialize(buffer)
.map(SequenceNumberSet)
- .ok_or_else(|| "invalid bitmap bytes".to_string())
+ .map_err(|e| format!("failed to deserialise sequence number set: {e}"))
}
}
|
0c0a38c4844064057faee77b764a8afaedc90d23
|
Dom Dwyer
|
2022-10-19 11:47:03
|
more verbose shard reset logs
|
Adds a little more context to the "shard reset" logs.
| null |
refactor: more verbose shard reset logs
Adds a little more context to the "shard reset" logs.
|
diff --git a/ingester/src/stream_handler/handler.rs b/ingester/src/stream_handler/handler.rs
index 1b674fd2ea..3f32b20bcd 100644
--- a/ingester/src/stream_handler/handler.rs
+++ b/ingester/src/stream_handler/handler.rs
@@ -279,7 +279,8 @@ where
shard_index=%self.shard_index,
shard_id=%self.shard_id,
potential_data_loss=true,
- "reset stream"
+ "unable to read from desired sequence number offset \
+ - reset stream to oldest available data"
);
self.shard_reset_count.inc(1);
sequence_number_before_reset = Some(self.current_sequence_number);
@@ -293,7 +294,8 @@ where
shard_index=%self.shard_index,
shard_id=%self.shard_id,
potential_data_loss=true,
- "unable to read from desired sequence number offset"
+ "unable to read from desired sequence number offset \
+ - aborting ingest due to configuration"
);
self.shard_unknown_sequence_number_count.inc(1);
None
|
8bfccb74ab0540a06f81147143887b47a1fb6755
|
Jackson Newhouse
|
2024-12-17 16:38:12
|
Runtime and write-back improvements (#25672)
|
* Move processing engine invocation to a seperate tokio task.
* Support writing back line protocol from python via insert_line_protocol().
* Update structs to work with bincode.
| null |
feat(processing_engine): Runtime and write-back improvements (#25672)
* Move processing engine invocation to a seperate tokio task.
* Support writing back line protocol from python via insert_line_protocol().
* Update structs to work with bincode.
|
diff --git a/Cargo.lock b/Cargo.lock
index f69d622d6e..6e3399dfb0 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -3267,8 +3267,10 @@ dependencies = [
name = "influxdb3_py_api"
version = "0.1.0"
dependencies = [
+ "async-trait",
"influxdb3_catalog",
"influxdb3_wal",
+ "parking_lot",
"pyo3",
"schema",
]
@@ -3472,6 +3474,7 @@ dependencies = [
"parquet",
"parquet_file",
"pretty_assertions",
+ "pyo3",
"schema",
"serde",
"serde_json",
diff --git a/influxdb3/src/commands/serve.rs b/influxdb3/src/commands/serve.rs
index d72b56c35f..2f0511716d 100644
--- a/influxdb3/src/commands/serve.rs
+++ b/influxdb3/src/commands/serve.rs
@@ -475,20 +475,18 @@ pub async fn command(config: Config) -> Result<()> {
)
.map_err(Error::InitializeMetaCache)?;
- let write_buffer_impl = Arc::new(
- WriteBufferImpl::new(WriteBufferImplArgs {
- persister: Arc::clone(&persister),
- catalog: Arc::clone(&catalog),
- last_cache,
- meta_cache,
- time_provider: Arc::<SystemProvider>::clone(&time_provider),
- executor: Arc::clone(&exec),
- wal_config,
- parquet_cache,
- })
- .await
- .map_err(|e| Error::WriteBufferInit(e.into()))?,
- );
+ let write_buffer_impl = WriteBufferImpl::new(WriteBufferImplArgs {
+ persister: Arc::clone(&persister),
+ catalog: Arc::clone(&catalog),
+ last_cache,
+ meta_cache,
+ time_provider: Arc::<SystemProvider>::clone(&time_provider),
+ executor: Arc::clone(&exec),
+ wal_config,
+ parquet_cache,
+ })
+ .await
+ .map_err(|e| Error::WriteBufferInit(e.into()))?;
let telemetry_store = setup_telemetry_store(
&config.object_store_config,
diff --git a/influxdb3_catalog/src/catalog.rs b/influxdb3_catalog/src/catalog.rs
index 73026f7751..611c0892eb 100644
--- a/influxdb3_catalog/src/catalog.rs
+++ b/influxdb3_catalog/src/catalog.rs
@@ -108,6 +108,16 @@ pub enum Error {
},
#[error("Processing Engine Unimplemented: {}", feature_description)]
ProcessingEngineUnimplemented { feature_description: String },
+
+ #[error(
+ "Processing Engine Trigger {} not in DB {}",
+ trigger_name,
+ database_name
+ )]
+ ProcessingEngineTriggerNotFound {
+ database_name: String,
+ trigger_name: String,
+ },
}
pub type Result<T, E = Error> = std::result::Result<T, E>;
@@ -356,6 +366,21 @@ impl Catalog {
}
}
+ pub fn triggers(&self) -> Vec<(String, String)> {
+ let inner = self.inner.read();
+ let result = inner
+ .databases
+ .values()
+ .flat_map(|schema| {
+ schema
+ .processing_engine_triggers
+ .keys()
+ .map(move |key| (schema.name.to_string(), key.to_string()))
+ })
+ .collect();
+ result
+ }
+
pub fn inner(&self) -> &RwLock<InnerCatalog> {
&self.inner
}
@@ -891,10 +916,10 @@ impl TableDefinition {
.expect("tables defined from ops should not exceed column limits")
}
- pub(crate) fn check_and_add_new_fields<'a>(
- &'a self,
+ pub(crate) fn check_and_add_new_fields(
+ &self,
table_definition: &influxdb3_wal::TableDefinition,
- ) -> Result<Cow<'a, Self>> {
+ ) -> Result<Cow<'_, Self>> {
// validate the series key is the same
if table_definition.key != self.series_key {
return Err(Error::SeriesKeyMismatch {
diff --git a/influxdb3_catalog/src/serialize.rs b/influxdb3_catalog/src/serialize.rs
index 4c0b263b34..738ae3b76b 100644
--- a/influxdb3_catalog/src/serialize.rs
+++ b/influxdb3_catalog/src/serialize.rs
@@ -9,7 +9,7 @@ use influxdb3_id::DbId;
use influxdb3_id::SerdeVecMap;
use influxdb3_id::TableId;
use influxdb3_wal::{
- LastCacheDefinition, LastCacheValueColumnsDef, PluginDefinition, TriggerDefinition,
+ LastCacheDefinition, LastCacheValueColumnsDef, PluginDefinition, PluginType, TriggerDefinition,
};
use schema::InfluxColumnType;
use schema::InfluxFieldType;
@@ -163,7 +163,7 @@ struct ProcessingEnginePluginSnapshot {
pub plugin_name: String,
pub code: String,
pub function_name: String,
- pub plugin_type: String,
+ pub plugin_type: PluginType,
}
#[derive(Debug, Serialize, Deserialize)]
@@ -411,7 +411,7 @@ impl From<&PluginDefinition> for ProcessingEnginePluginSnapshot {
plugin_name: plugin.plugin_name.to_string(),
code: plugin.code.to_string(),
function_name: plugin.function_name.to_string(),
- plugin_type: serde_json::to_string(&plugin.plugin_type).unwrap(),
+ plugin_type: plugin.plugin_type,
}
}
}
@@ -419,10 +419,10 @@ impl From<&PluginDefinition> for ProcessingEnginePluginSnapshot {
impl From<ProcessingEnginePluginSnapshot> for PluginDefinition {
fn from(plugin: ProcessingEnginePluginSnapshot) -> Self {
Self {
- plugin_name: plugin.plugin_type.to_string(),
+ plugin_name: plugin.plugin_name.to_string(),
code: plugin.code.to_string(),
function_name: plugin.function_name.to_string(),
- plugin_type: serde_json::from_str(&plugin.plugin_type).expect("serialized plugin type"),
+ plugin_type: plugin.plugin_type,
}
}
}
diff --git a/influxdb3_py_api/Cargo.toml b/influxdb3_py_api/Cargo.toml
index 2af87b5de7..a76817489e 100644
--- a/influxdb3_py_api/Cargo.toml
+++ b/influxdb3_py_api/Cargo.toml
@@ -11,7 +11,9 @@ system-py = ["pyo3"]
[dependencies]
influxdb3_wal = { path = "../influxdb3_wal" }
influxdb3_catalog = {path = "../influxdb3_catalog"}
-schema = { workspace = true }
+async-trait.workspace = true
+schema.workspace = true
+parking_lot.workspace = true
[dependencies.pyo3]
version = "0.23.3"
diff --git a/influxdb3_py_api/src/system_py.rs b/influxdb3_py_api/src/system_py.rs
index e58a95a078..2c0b7e1caf 100644
--- a/influxdb3_py_api/src/system_py.rs
+++ b/influxdb3_py_api/src/system_py.rs
@@ -1,5 +1,6 @@
use influxdb3_catalog::catalog::{DatabaseSchema, TableDefinition};
use influxdb3_wal::{FieldData, Row, WriteBatch};
+use parking_lot::Mutex;
use pyo3::exceptions::PyValueError;
use pyo3::prelude::{PyAnyMethods, PyModule, PyModuleMethods};
use pyo3::{pyclass, pymethods, pymodule, Bound, IntoPyObject, PyErr, PyObject, PyResult, Python};
@@ -101,7 +102,7 @@ pub struct PyWriteBatch {
#[pymethods]
impl PyWriteBatch {
- fn get_iterator_for_table(&self, table_name: &str) -> PyResult<PyWriteBatchIterator> {
+ fn get_iterator_for_table(&self, table_name: &str) -> PyResult<Option<PyWriteBatchIterator>> {
// Find table ID from name
let table_id = self
.schema
@@ -112,9 +113,9 @@ impl PyWriteBatch {
})?;
// Get table chunks
- let chunks = self.write_batch.table_chunks.get(table_id).ok_or_else(|| {
- PyErr::new::<PyValueError, _>(format!("No data for table '{}'", table_name))
- })?;
+ let Some(chunks) = self.write_batch.table_chunks.get(table_id) else {
+ return Ok(None);
+ };
// Get table definition
let table_def = self.schema.tables.get(table_id).ok_or_else(|| {
@@ -124,7 +125,7 @@ impl PyWriteBatch {
))
})?;
- Ok(PyWriteBatchIterator {
+ Ok(Some(PyWriteBatchIterator {
table_definition: Arc::clone(table_def),
// TODO: avoid copying all the data at once.
rows: chunks
@@ -133,7 +134,22 @@ impl PyWriteBatch {
.flat_map(|chunk| chunk.rows.clone())
.collect(),
current_index: 0,
- })
+ }))
+ }
+}
+
+#[derive(Debug)]
+#[pyclass]
+pub struct PyLineProtocolOutput {
+ lines: Arc<Mutex<Vec<String>>>,
+}
+
+#[pymethods]
+impl PyLineProtocolOutput {
+ fn insert_line_protocol(&mut self, line: &str) -> PyResult<()> {
+ let mut lines = self.lines.lock();
+ lines.push(line.to_string());
+ Ok(())
}
}
@@ -143,13 +159,26 @@ impl PyWriteBatch {
table_name: &str,
setup_code: &str,
call_site: &str,
- ) -> PyResult<()> {
- let iterator = self.get_iterator_for_table(table_name)?;
+ ) -> PyResult<Vec<String>> {
+ let Some(iterator) = self.get_iterator_for_table(table_name)? else {
+ return Ok(Vec::new());
+ };
+
Python::with_gil(|py| {
py.run(&CString::new(setup_code)?, None, None)?;
let py_func = py.eval(&CString::new(call_site)?, None, None)?;
- py_func.call1((iterator,))?;
- Ok::<(), PyErr>(())
+
+ // Create the output collector with shared state
+ let lines = Arc::new(Mutex::new(Vec::new()));
+ let output = PyLineProtocolOutput {
+ lines: Arc::clone(&lines),
+ };
+
+ // Pass both iterator and output collector to the Python function
+ py_func.call1((iterator, output.into_pyobject(py)?))?;
+
+ let output_lines = lines.lock().clone();
+ Ok(output_lines)
})
}
}
diff --git a/influxdb3_server/src/http.rs b/influxdb3_server/src/http.rs
index 01da6f34a7..883898c626 100644
--- a/influxdb3_server/src/http.rs
+++ b/influxdb3_server/src/http.rs
@@ -984,11 +984,18 @@ where
self.write_buffer
.insert_trigger(
db.as_str(),
- trigger_name,
+ trigger_name.clone(),
plugin_name,
trigger_specification,
)
.await?;
+ self.write_buffer
+ .run_trigger(
+ Arc::clone(&self.write_buffer),
+ db.as_str(),
+ trigger_name.as_str(),
+ )
+ .await?;
Ok(Response::builder()
.status(StatusCode::OK)
.body(Body::empty())?)
diff --git a/influxdb3_server/src/lib.rs b/influxdb3_server/src/lib.rs
index 8f8dc498bb..9f3f6a2946 100644
--- a/influxdb3_server/src/lib.rs
+++ b/influxdb3_server/src/lib.rs
@@ -801,26 +801,24 @@ mod tests {
let sample_host_id = Arc::from("sample-host-id");
let instance_id = Arc::from("sample-instance-id");
let catalog = Arc::new(Catalog::new(sample_host_id, instance_id));
- let write_buffer_impl = Arc::new(
- influxdb3_write::write_buffer::WriteBufferImpl::new(
- influxdb3_write::write_buffer::WriteBufferImplArgs {
- persister: Arc::clone(&persister),
- catalog: Arc::clone(&catalog),
- last_cache: LastCacheProvider::new_from_catalog(Arc::clone(&catalog)).unwrap(),
- meta_cache: MetaCacheProvider::new_from_catalog(
- Arc::clone(&time_provider) as _,
- Arc::clone(&catalog),
- )
- .unwrap(),
- time_provider: Arc::clone(&time_provider) as _,
- executor: Arc::clone(&exec),
- wal_config: WalConfig::test_config(),
- parquet_cache: Some(parquet_cache),
- },
- )
- .await
- .unwrap(),
- );
+ let write_buffer_impl = influxdb3_write::write_buffer::WriteBufferImpl::new(
+ influxdb3_write::write_buffer::WriteBufferImplArgs {
+ persister: Arc::clone(&persister),
+ catalog: Arc::clone(&catalog),
+ last_cache: LastCacheProvider::new_from_catalog(Arc::clone(&catalog)).unwrap(),
+ meta_cache: MetaCacheProvider::new_from_catalog(
+ Arc::clone(&time_provider) as _,
+ Arc::clone(&catalog),
+ )
+ .unwrap(),
+ time_provider: Arc::clone(&time_provider) as _,
+ executor: Arc::clone(&exec),
+ wal_config: WalConfig::test_config(),
+ parquet_cache: Some(parquet_cache),
+ },
+ )
+ .await
+ .unwrap();
let sys_events_store = Arc::new(SysEventStore::new(Arc::<MockProvider>::clone(
&time_provider,
diff --git a/influxdb3_server/src/query_executor.rs b/influxdb3_server/src/query_executor.rs
index 615517f16f..ae12b1358c 100644
--- a/influxdb3_server/src/query_executor.rs
+++ b/influxdb3_server/src/query_executor.rs
@@ -682,29 +682,27 @@ mod tests {
let host_id = Arc::from("sample-host-id");
let instance_id = Arc::from("instance-id");
let catalog = Arc::new(Catalog::new(host_id, instance_id));
- let write_buffer_impl = Arc::new(
- WriteBufferImpl::new(WriteBufferImplArgs {
- persister,
- catalog: Arc::clone(&catalog),
- last_cache: LastCacheProvider::new_from_catalog(Arc::clone(&catalog)).unwrap(),
- meta_cache: MetaCacheProvider::new_from_catalog(
- Arc::<MockProvider>::clone(&time_provider),
- Arc::clone(&catalog),
- )
- .unwrap(),
- time_provider: Arc::<MockProvider>::clone(&time_provider),
- executor: Arc::clone(&exec),
- wal_config: WalConfig {
- gen1_duration: Gen1Duration::new_1m(),
- max_write_buffer_size: 100,
- flush_interval: Duration::from_millis(10),
- snapshot_size: 1,
- },
- parquet_cache: Some(parquet_cache),
- })
- .await
+ let write_buffer_impl = WriteBufferImpl::new(WriteBufferImplArgs {
+ persister,
+ catalog: Arc::clone(&catalog),
+ last_cache: LastCacheProvider::new_from_catalog(Arc::clone(&catalog)).unwrap(),
+ meta_cache: MetaCacheProvider::new_from_catalog(
+ Arc::<MockProvider>::clone(&time_provider),
+ Arc::clone(&catalog),
+ )
.unwrap(),
- );
+ time_provider: Arc::<MockProvider>::clone(&time_provider),
+ executor: Arc::clone(&exec),
+ wal_config: WalConfig {
+ gen1_duration: Gen1Duration::new_1m(),
+ max_write_buffer_size: 100,
+ flush_interval: Duration::from_millis(10),
+ snapshot_size: 1,
+ },
+ parquet_cache: Some(parquet_cache),
+ })
+ .await
+ .unwrap();
let persisted_files: Arc<PersistedFiles> = Arc::clone(&write_buffer_impl.persisted_files());
let telemetry_store = TelemetryStore::new_without_background_runners(persisted_files);
diff --git a/influxdb3_wal/src/lib.rs b/influxdb3_wal/src/lib.rs
index ff468cfeb0..06f3ca5a28 100644
--- a/influxdb3_wal/src/lib.rs
+++ b/influxdb3_wal/src/lib.rs
@@ -554,7 +554,7 @@ pub struct TriggerDefinition {
}
#[derive(Debug, Serialize, Deserialize, Eq, PartialEq, Clone)]
-#[serde(tag = "type", rename_all = "snake_case")]
+#[serde(rename_all = "snake_case")]
pub enum TriggerSpecificationDefinition {
SingleTableWalWrite { table_name: String },
AllTablesWalWrite,
diff --git a/influxdb3_write/Cargo.toml b/influxdb3_write/Cargo.toml
index e7e00aa75f..6c7fe29728 100644
--- a/influxdb3_write/Cargo.toml
+++ b/influxdb3_write/Cargo.toml
@@ -6,7 +6,7 @@ edition.workspace = true
license.workspace = true
[features]
-"system-py" = ["influxdb3_py_api/system-py"]
+"system-py" = ["influxdb3_py_api/system-py", "pyo3"]
[dependencies]
# Core Crates
@@ -61,6 +61,12 @@ tokio.workspace = true
url.workspace = true
uuid.workspace = true
+[dependencies.pyo3]
+version = "0.23.3"
+# this is necessary to automatically initialize the Python interpreter
+features = ["auto-initialize"]
+optional = true
+
[dev-dependencies]
# Core Crates
arrow_util.workspace = true
diff --git a/influxdb3_write/src/lib.rs b/influxdb3_write/src/lib.rs
index 2da430c0e4..8f624994c7 100644
--- a/influxdb3_write/src/lib.rs
+++ b/influxdb3_write/src/lib.rs
@@ -24,8 +24,8 @@ use influxdb3_id::ParquetFileId;
use influxdb3_id::SerdeVecMap;
use influxdb3_id::TableId;
use influxdb3_id::{ColumnId, DbId};
+use influxdb3_wal::MetaCacheDefinition;
use influxdb3_wal::{LastCacheDefinition, SnapshotSequenceNumber, WalFileSequenceNumber};
-use influxdb3_wal::{MetaCacheDefinition, PluginType, TriggerSpecificationDefinition};
use iox_query::QueryChunk;
use iox_time::Time;
use serde::{Deserialize, Serialize};
@@ -33,6 +33,7 @@ use std::fmt::Debug;
use std::sync::Arc;
use std::time::Duration;
use thiserror::Error;
+use write_buffer::plugins::ProcessingEngineManager;
#[derive(Debug, Error)]
pub enum Error {
@@ -171,30 +172,6 @@ pub trait LastCacheManager: Debug + Send + Sync + 'static {
) -> Result<(), write_buffer::Error>;
}
-/// `[ProcessingEngineManager]` is used to interact with the processing engine,
-/// in particular plugins and triggers.
-///
-#[async_trait::async_trait]
-pub trait ProcessingEngineManager: Debug + Send + Sync + 'static {
- /// Inserts a plugin
- async fn insert_plugin(
- &self,
- db: &str,
- plugin_name: String,
- code: String,
- function_name: String,
- plugin_type: PluginType,
- ) -> Result<(), write_buffer::Error>;
-
- async fn insert_trigger(
- &self,
- db_name: &str,
- trigger_name: String,
- plugin_name: String,
- trigger_specification: TriggerSpecificationDefinition,
- ) -> Result<(), write_buffer::Error>;
-}
-
/// A single write request can have many lines in it. A writer can request to accept all lines that are valid, while
/// returning an error for any invalid lines. This is the error information for a single invalid line.
#[derive(Debug, Serialize)]
diff --git a/influxdb3_write/src/write_buffer/mod.rs b/influxdb3_write/src/write_buffer/mod.rs
index 9df9e54810..243a7a8aec 100644
--- a/influxdb3_write/src/write_buffer/mod.rs
+++ b/influxdb3_write/src/write_buffer/mod.rs
@@ -1,6 +1,8 @@
//! Implementation of an in-memory buffer for writes that persists data into a wal if it is configured.
pub mod persisted_files;
+#[allow(dead_code)]
+pub mod plugins;
pub mod queryable_buffer;
mod table_buffer;
pub mod validator;
@@ -9,7 +11,7 @@ use crate::persister::Persister;
use crate::write_buffer::persisted_files::PersistedFiles;
use crate::write_buffer::queryable_buffer::QueryableBuffer;
use crate::write_buffer::validator::WriteValidator;
-use crate::{chunk::ParquetChunk, DatabaseManager, ProcessingEngineManager};
+use crate::{chunk::ParquetChunk, write_buffer, DatabaseManager};
use crate::{
BufferedWriteRequest, Bufferer, ChunkContainer, LastCacheManager, MetaCacheManager,
ParquetFile, PersistedSnapshot, Precision, WriteBuffer, WriteLineError,
@@ -31,7 +33,7 @@ use influxdb3_catalog::catalog::{Catalog, DatabaseSchema};
use influxdb3_id::{ColumnId, DbId, TableId};
use influxdb3_wal::{
object_store::WalObjectStore, DeleteDatabaseDefinition, PluginDefinition, PluginType,
- TriggerDefinition, TriggerSpecificationDefinition,
+ TriggerDefinition, TriggerSpecificationDefinition, WalContents,
};
use influxdb3_wal::{
CatalogBatch, CatalogOp, LastCacheDefinition, LastCacheDelete, LastCacheSize,
@@ -45,6 +47,7 @@ use object_store::path::Path as ObjPath;
use object_store::{ObjectMeta, ObjectStore};
use observability_deps::tracing::{debug, error};
use parquet_file::storage::ParquetExecInput;
+use plugins::ProcessingEngineManager;
use queryable_buffer::QueryableBufferArgs;
use schema::Schema;
use std::sync::Arc;
@@ -52,6 +55,12 @@ use std::time::Duration;
use thiserror::Error;
use tokio::sync::watch::Receiver;
+#[cfg(feature = "system-py")]
+use {
+ crate::write_buffer::plugins::PluginContext,
+ influxdb3_catalog::catalog::Error::ProcessingEngineTriggerNotFound,
+};
+
#[derive(Debug, Error)]
pub enum Error {
#[error("parsing for line protocol failed")]
@@ -166,7 +175,7 @@ impl WriteBufferImpl {
wal_config,
parquet_cache,
}: WriteBufferImplArgs,
- ) -> Result<Self> {
+ ) -> Result<Arc<Self>> {
// load snapshots and replay the wal into the in memory buffer
let persisted_snapshots = persister
.load_snapshots(N_SNAPSHOTS_TO_LOAD_ON_START)
@@ -211,7 +220,7 @@ impl WriteBufferImpl {
)
.await?;
- Ok(Self {
+ let result = Arc::new(Self {
catalog,
parquet_cache,
persister,
@@ -222,7 +231,15 @@ impl WriteBufferImpl {
last_cache,
persisted_files,
buffer: queryable_buffer,
- })
+ });
+ let write_buffer: Arc<dyn WriteBuffer> = result.clone();
+ let triggers = result.catalog().triggers();
+ for (db_name, trigger_name) in triggers {
+ result
+ .run_trigger(Arc::clone(&write_buffer), &db_name, &trigger_name)
+ .await?;
+ }
+ Ok(result)
}
pub fn catalog(&self) -> Arc<Catalog> {
@@ -771,6 +788,46 @@ impl ProcessingEngineManager for WriteBufferImpl {
self.wal.write_ops(vec![wal_op]).await?;
Ok(())
}
+
+ #[cfg_attr(not(feature = "system-py"), allow(unused))]
+ async fn run_trigger(
+ &self,
+ write_buffer: Arc<dyn WriteBuffer>,
+ db_name: &str,
+ trigger_name: &str,
+ ) -> crate::Result<(), write_buffer::Error> {
+ #[cfg(feature = "system-py")]
+ {
+ let (_db_id, db_schema) =
+ self.catalog
+ .db_id_and_schema(db_name)
+ .ok_or_else(|| Error::DatabaseNotFound {
+ db_name: db_name.to_string(),
+ })?;
+ let trigger = db_schema
+ .processing_engine_triggers
+ .get(trigger_name)
+ .ok_or_else(|| ProcessingEngineTriggerNotFound {
+ database_name: db_name.to_string(),
+ trigger_name: trigger_name.to_string(),
+ })?
+ .clone();
+ let trigger_rx = self.buffer.subscribe_to_plugin_events();
+ let plugin_context = PluginContext {
+ trigger_rx,
+ write_buffer,
+ };
+ plugins::run_plugin(db_name.to_string(), trigger, plugin_context);
+ }
+
+ Ok(())
+ }
+}
+
+#[derive(Clone)]
+#[allow(unused)]
+pub(crate) enum PluginEvent {
+ WriteWalContents(Arc<WalContents>),
}
impl WriteBuffer for WriteBufferImpl {}
@@ -1299,7 +1356,7 @@ mod tests {
// do three writes to force a snapshot
do_writes(
db_name,
- &write_buffer,
+ write_buffer.as_ref(),
&[
TestWrite {
lp: "cpu bar=1",
@@ -1324,7 +1381,7 @@ mod tests {
// WAL period left from before:
do_writes(
db_name,
- &write_buffer,
+ write_buffer.as_ref(),
&[
TestWrite {
lp: "cpu bar=4",
@@ -1345,7 +1402,7 @@ mod tests {
// and finally, do two more, with a catalog update, forcing persistence
do_writes(
db_name,
- &write_buffer,
+ write_buffer.as_ref(),
&[
TestWrite {
lp: "cpu bar=6,asdf=true",
@@ -1578,7 +1635,7 @@ mod tests {
// do some writes to get a snapshot:
do_writes(
db_name,
- &wbuf,
+ wbuf.as_ref(),
&[
TestWrite {
lp: format!("{tbl_name},name=espresso price=2.50"),
@@ -1645,7 +1702,7 @@ mod tests {
// Do six writes to trigger a snapshot
do_writes(
db_name,
- &wbuf,
+ wbuf.as_ref(),
&[
TestWrite {
lp: format!("{tbl_name},name=espresso,type=drink price=2.50"),
@@ -1731,7 +1788,7 @@ mod tests {
// do some writes to get a snapshot:
do_writes(
db_name,
- &wbuf,
+ wbuf.as_ref(),
&[
TestWrite {
lp: format!("{tbl_name},name=espresso price=2.50"),
@@ -1809,7 +1866,7 @@ mod tests {
// do some writes to get a snapshot:
do_writes(
db_name,
- &wbuf,
+ wbuf.as_ref(),
&[
TestWrite {
lp: format!("{tbl_name},name=espresso price=2.50"),
@@ -1854,7 +1911,7 @@ mod tests {
// do some writes to get a snapshot:
do_writes(
db_name,
- &wbuf,
+ wbuf.as_ref(),
&[
TestWrite {
lp: format!("{tbl_name},name=espresso price=2.50"),
@@ -1929,7 +1986,7 @@ mod tests {
// make some writes to generate a snapshot:
do_writes(
db_name,
- &wbuf,
+ wbuf.as_ref(),
&[
TestWrite {
lp: format!(
@@ -2035,7 +2092,7 @@ mod tests {
// make some writes to generate a snapshot:
do_writes(
db_name,
- &wbuf,
+ wbuf.as_ref(),
&[
TestWrite {
lp: format!(
@@ -2254,7 +2311,11 @@ mod tests {
start: Time,
object_store: Arc<dyn ObjectStore>,
wal_config: WalConfig,
- ) -> (WriteBufferImpl, IOxSessionContext, Arc<dyn TimeProvider>) {
+ ) -> (
+ Arc<WriteBufferImpl>,
+ IOxSessionContext,
+ Arc<dyn TimeProvider>,
+ ) {
setup_cache_optional(start, object_store, wal_config, true).await
}
@@ -2263,7 +2324,11 @@ mod tests {
object_store: Arc<dyn ObjectStore>,
wal_config: WalConfig,
use_cache: bool,
- ) -> (WriteBufferImpl, IOxSessionContext, Arc<dyn TimeProvider>) {
+ ) -> (
+ Arc<WriteBufferImpl>,
+ IOxSessionContext,
+ Arc<dyn TimeProvider>,
+ ) {
let time_provider: Arc<dyn TimeProvider> = Arc::new(MockProvider::new(start));
let (object_store, parquet_cache) = if use_cache {
let (object_store, parquet_cache) = test_cached_obj_store_and_oracle(
diff --git a/influxdb3_write/src/write_buffer/plugins.rs b/influxdb3_write/src/write_buffer/plugins.rs
new file mode 100644
index 0000000000..0ba393c5b7
--- /dev/null
+++ b/influxdb3_write/src/write_buffer/plugins.rs
@@ -0,0 +1,190 @@
+use crate::write_buffer::PluginEvent;
+use crate::{write_buffer, WriteBuffer};
+use influxdb3_wal::{PluginType, TriggerDefinition, TriggerSpecificationDefinition};
+use observability_deps::tracing::warn;
+use std::fmt::Debug;
+use std::sync::Arc;
+use thiserror::Error;
+use tokio::sync::broadcast::error::RecvError;
+
+#[derive(Debug, Error)]
+pub enum Error {
+ #[error("couldn't find db")]
+ MissingDb,
+
+ #[cfg(feature = "system-py")]
+ #[error(transparent)]
+ PyError(#[from] pyo3::PyErr),
+
+ #[error(transparent)]
+ WriteBufferError(#[from] write_buffer::Error),
+}
+
+/// `[ProcessingEngineManager]` is used to interact with the processing engine,
+/// in particular plugins and triggers.
+///
+#[async_trait::async_trait]
+pub trait ProcessingEngineManager: Debug + Send + Sync + 'static {
+ /// Inserts a plugin
+ async fn insert_plugin(
+ &self,
+ db: &str,
+ plugin_name: String,
+ code: String,
+ function_name: String,
+ plugin_type: PluginType,
+ ) -> crate::Result<(), write_buffer::Error>;
+
+ async fn insert_trigger(
+ &self,
+ db_name: &str,
+ trigger_name: String,
+ plugin_name: String,
+ trigger_specification: TriggerSpecificationDefinition,
+ ) -> crate::Result<(), write_buffer::Error>;
+
+ /// Starts running the trigger, which will run in the background.
+ async fn run_trigger(
+ &self,
+ write_buffer: Arc<dyn WriteBuffer>,
+ db_name: &str,
+ trigger_name: &str,
+ ) -> crate::Result<(), write_buffer::Error>;
+}
+
+#[cfg(feature = "system-py")]
+pub(crate) fn run_plugin(
+ db_name: String,
+ trigger_definition: TriggerDefinition,
+ mut context: PluginContext,
+) {
+ let trigger_plugin = TriggerPlugin {
+ trigger_definition,
+ db_name,
+ };
+ tokio::task::spawn(async move {
+ trigger_plugin
+ .run_plugin(&mut context)
+ .await
+ .expect("trigger plugin failed");
+ });
+}
+
+pub(crate) struct PluginContext {
+ // tokio channel for inputs
+ pub(crate) trigger_rx: tokio::sync::broadcast::Receiver<PluginEvent>,
+ // handler to write data back to the DB.
+ pub(crate) write_buffer: Arc<dyn WriteBuffer>,
+}
+
+#[async_trait::async_trait]
+trait RunnablePlugin {
+ async fn process_event(
+ &self,
+ event: PluginEvent,
+ write_buffer: Arc<dyn WriteBuffer>,
+ ) -> Result<(), Error>;
+ async fn run_plugin(&self, context: &mut PluginContext) -> Result<(), Error> {
+ loop {
+ match context.trigger_rx.recv().await {
+ Err(RecvError::Closed) => {
+ break;
+ }
+ Err(RecvError::Lagged(_)) => {
+ warn!("plugin lagged");
+ }
+ Ok(event) => {
+ self.process_event(event, context.write_buffer.clone())
+ .await?;
+ }
+ }
+ }
+ Ok(())
+ }
+}
+#[derive(Debug)]
+struct TriggerPlugin {
+ trigger_definition: TriggerDefinition,
+ db_name: String,
+}
+
+#[cfg(feature = "system-py")]
+mod python_plugin {
+ use super::*;
+ use crate::Precision;
+ use data_types::NamespaceName;
+ use influxdb3_py_api::system_py::PyWriteBatch;
+ use influxdb3_wal::WalOp;
+ use iox_time::Time;
+ use std::time::SystemTime;
+
+ #[async_trait::async_trait]
+ impl RunnablePlugin for TriggerPlugin {
+ async fn process_event(
+ &self,
+ event: PluginEvent,
+ write_buffer: Arc<dyn WriteBuffer>,
+ ) -> Result<(), Error> {
+ let Some(schema) = write_buffer.catalog().db_schema(self.db_name.as_str()) else {
+ return Err(Error::MissingDb);
+ };
+
+ let mut output_lines = Vec::new();
+
+ match event {
+ PluginEvent::WriteWalContents(wal_contents) => {
+ for wal_op in &wal_contents.ops {
+ match wal_op {
+ WalOp::Write(write_batch) => {
+ let py_write_batch = PyWriteBatch {
+ // TODO: don't clone the write batch
+ write_batch: write_batch.clone(),
+ schema: Arc::clone(&schema),
+ };
+ match &self.trigger_definition.trigger {
+ TriggerSpecificationDefinition::SingleTableWalWrite {
+ table_name,
+ } => {
+ output_lines.extend(py_write_batch.call_against_table(
+ table_name,
+ &self.trigger_definition.plugin.code,
+ &self.trigger_definition.plugin.function_name,
+ )?);
+ }
+ TriggerSpecificationDefinition::AllTablesWalWrite => {
+ for table in schema.table_map.right_values() {
+ output_lines.extend(
+ py_write_batch.call_against_table(
+ table.as_ref(),
+ &self.trigger_definition.plugin.code,
+ &self.trigger_definition.plugin.function_name,
+ )?,
+ );
+ }
+ }
+ }
+ }
+ WalOp::Catalog(_) => {}
+ }
+ }
+ }
+ }
+ if !output_lines.is_empty() {
+ let ingest_time = SystemTime::now()
+ .duration_since(SystemTime::UNIX_EPOCH)
+ .unwrap();
+ write_buffer
+ .write_lp(
+ NamespaceName::new(self.db_name.to_string()).unwrap(),
+ output_lines.join("\n").as_str(),
+ Time::from_timestamp_nanos(ingest_time.as_nanos() as i64),
+ false,
+ Precision::Nanosecond,
+ )
+ .await?;
+ }
+
+ Ok(())
+ }
+ }
+}
diff --git a/influxdb3_write/src/write_buffer/queryable_buffer.rs b/influxdb3_write/src/write_buffer/queryable_buffer.rs
index e5b9ec75f5..242e55e001 100644
--- a/influxdb3_write/src/write_buffer/queryable_buffer.rs
+++ b/influxdb3_write/src/write_buffer/queryable_buffer.rs
@@ -3,6 +3,7 @@ use crate::paths::ParquetFilePath;
use crate::persister::Persister;
use crate::write_buffer::persisted_files::PersistedFiles;
use crate::write_buffer::table_buffer::TableBuffer;
+use crate::write_buffer::PluginEvent;
use crate::{ParquetFile, ParquetFileId, PersistedSnapshot};
use arrow::record_batch::RecordBatch;
use async_trait::async_trait;
@@ -20,8 +21,6 @@ use influxdb3_cache::meta_cache::MetaCacheProvider;
use influxdb3_cache::parquet_cache::{CacheRequest, ParquetCacheOracle};
use influxdb3_catalog::catalog::{Catalog, DatabaseSchema};
use influxdb3_id::{DbId, TableId};
-#[cfg(feature = "system-py")]
-use influxdb3_py_api::system_py::PyWriteBatch;
use influxdb3_wal::{CatalogOp, SnapshotDetails, WalContents, WalFileNotifier, WalOp, WriteBatch};
use iox_query::chunk_statistics::{create_chunk_statistics, NoColumnRanges};
use iox_query::exec::Executor;
@@ -29,15 +28,15 @@ use iox_query::frontend::reorg::ReorgPlanner;
use iox_query::QueryChunk;
use object_store::path::Path;
use observability_deps::tracing::{error, info};
-use parking_lot::RwLock;
+use parking_lot::{Mutex, RwLock};
use parquet::format::FileMetaData;
use schema::sort::SortKey;
use schema::Schema;
use std::any::Any;
use std::sync::Arc;
use std::time::Duration;
-use tokio::sync::oneshot;
use tokio::sync::oneshot::Receiver;
+use tokio::sync::{broadcast, oneshot};
#[derive(Debug)]
pub struct QueryableBuffer {
@@ -52,6 +51,7 @@ pub struct QueryableBuffer {
/// Sends a notification to this watch channel whenever a snapshot info is persisted
persisted_snapshot_notify_rx: tokio::sync::watch::Receiver<Option<PersistedSnapshot>>,
persisted_snapshot_notify_tx: tokio::sync::watch::Sender<Option<PersistedSnapshot>>,
+ plugin_event_tx: Mutex<Option<broadcast::Sender<PluginEvent>>>,
}
pub struct QueryableBufferArgs {
@@ -90,6 +90,7 @@ impl QueryableBuffer {
parquet_cache,
persisted_snapshot_notify_rx,
persisted_snapshot_notify_tx,
+ plugin_event_tx: Mutex::new(None),
}
}
@@ -368,11 +369,28 @@ impl QueryableBuffer {
let mut buffer = self.buffer.write();
buffer.db_to_table.remove(db_id);
}
+
+ #[cfg(feature = "system-py")]
+ pub(crate) fn subscribe_to_plugin_events(&self) -> broadcast::Receiver<PluginEvent> {
+ let mut sender = self.plugin_event_tx.lock();
+
+ if sender.is_none() {
+ let (tx, rx) = broadcast::channel(1024);
+ *sender = Some(tx);
+ return rx;
+ }
+ sender.as_ref().unwrap().subscribe()
+ }
}
#[async_trait]
impl WalFileNotifier for QueryableBuffer {
fn notify(&self, write: WalContents) {
+ if let Some(sender) = self.plugin_event_tx.lock().as_ref() {
+ if let Err(err) = sender.send(PluginEvent::WriteWalContents(Arc::new(write.clone()))) {
+ error!(%err, "Error sending WAL content to plugins");
+ }
+ }
self.buffer_contents(write)
}
@@ -381,6 +399,11 @@ impl WalFileNotifier for QueryableBuffer {
write: WalContents,
snapshot_details: SnapshotDetails,
) -> Receiver<SnapshotDetails> {
+ if let Some(sender) = self.plugin_event_tx.lock().as_ref() {
+ if let Err(err) = sender.send(PluginEvent::WriteWalContents(Arc::new(write.clone()))) {
+ error!(%err, "Error sending WAL content to plugins");
+ }
+ }
self.buffer_contents_and_persist_snapshotted_data(write, snapshot_details)
.await
}
@@ -508,65 +531,6 @@ impl BufferState {
.db_schema_by_id(&write_batch.database_id)
.expect("database should exist");
- // TODO: factor this out
- #[cfg(feature = "system-py")]
- {
- use influxdb3_wal::TriggerSpecificationDefinition;
- use influxdb3_wal::TriggerSpecificationDefinition::SingleTableWalWrite;
- let write_tables: hashbrown::HashSet<_> = write_batch
- .table_chunks
- .keys()
- .map(|key| {
- let table_name = db_schema.table_map.get_by_left(key).unwrap();
- table_name.to_string()
- })
- .collect();
- let triggers: Vec<_> = db_schema
- .processing_engine_triggers
- .values()
- .filter_map(|trigger| match &trigger.trigger {
- SingleTableWalWrite { table_name } => {
- if write_tables.contains(table_name.as_str()) {
- Some((trigger, vec![table_name.clone()]))
- } else {
- None
- }
- }
- TriggerSpecificationDefinition::AllTablesWalWrite => {
- if !write_tables.is_empty() {
- Some((
- trigger,
- write_tables.iter().map(ToString::to_string).collect(),
- ))
- } else {
- None
- }
- }
- })
- .collect();
- if !triggers.is_empty() {
- // Create PyWriteBatch instance
- let py_write_batch = PyWriteBatch {
- write_batch: write_batch.clone(),
- schema: db_schema.clone(),
- };
- for (trigger, write_tables) in triggers {
- for table in &write_tables {
- if let Err(err) = py_write_batch.call_against_table(
- table,
- trigger.plugin.code.as_str(),
- trigger.plugin.function_name.as_str(),
- ) {
- error!(
- "failed to call trigger {} with error {}",
- trigger.trigger_name, err
- )
- }
- }
- }
- }
- }
-
let database_buffer = self.db_to_table.entry(write_batch.database_id).or_default();
for (table_id, table_chunks) in write_batch.table_chunks {
|
956d7bcee488bf057ea041d3a2a566f9fef71dc9
|
Carol (Nichols || Goulding)
|
2023-03-30 12:39:03
|
"revert: Merge pull request #7369 from influxdata/cn/parquet-file-saved-status"
|
This reverts commit 0d7393f2c1133413af862cf4f70603eba7b82a46.
| null |
revert: "revert: Merge pull request #7369 from influxdata/cn/parquet-file-saved-status"
This reverts commit 0d7393f2c1133413af862cf4f70603eba7b82a46.
|
diff --git a/compactor2/src/compactor.rs b/compactor2/src/compactor.rs
index 87a3642676..b57635e109 100644
--- a/compactor2/src/compactor.rs
+++ b/compactor2/src/compactor.rs
@@ -56,7 +56,12 @@ impl Compactor2 {
tokio::select! {
_ = shutdown_captured.cancelled() => {}
_ = async {
- compact(config.partition_concurrency, config.partition_timeout, Arc::clone(&job_semaphore), &components).await;
+ compact(
+ config.partition_concurrency,
+ config.partition_timeout,
+ Arc::clone(&job_semaphore),
+ &components
+ ).await;
info!("compactor done");
} => {}
diff --git a/compactor2/src/components/hardcoded.rs b/compactor2/src/components/hardcoded.rs
index 36cc60e620..2d1020780e 100644
--- a/compactor2/src/components/hardcoded.rs
+++ b/compactor2/src/components/hardcoded.rs
@@ -191,7 +191,10 @@ pub fn hardcoded_components(config: &Config) -> Arc<Components> {
.filter(|kind| {
// use explicit match statement so we never forget to add new variants
match kind {
- ErrorKind::OutOfMemory | ErrorKind::Timeout | ErrorKind::Unknown => true,
+ ErrorKind::OutOfMemory
+ | ErrorKind::Timeout
+ | ErrorKind::ConcurrentModification
+ | ErrorKind::Unknown => true,
ErrorKind::ObjectStore => false,
}
})
diff --git a/compactor2/src/driver.rs b/compactor2/src/driver.rs
index 831f0b6324..8ea00f9242 100644
--- a/compactor2/src/driver.rs
+++ b/compactor2/src/driver.rs
@@ -15,7 +15,7 @@ use crate::{
},
error::{DynError, ErrorKind, SimpleError},
file_classification::{FileClassification, FilesForProgress, FilesToSplitOrCompact},
- partition_info::PartitionInfo,
+ partition_info::{PartitionInfo, SavedParquetFileState},
PlanIR,
};
@@ -219,6 +219,12 @@ async fn try_compact_partition(
let mut files_next = files_later;
// loop for each "Branch"
for branch in branches {
+ // Keep the current state as a check to make sure this is the only compactor modifying this partition's
+ // files. Check that the catalog state matches this before committing and, if it doesn't match, throw away
+ // the compaction work we've done.
+ let saved_parquet_file_state =
+ fetch_and_save_parquet_file_state(&components, partition_id).await;
+
let input_paths: Vec<ParquetFilePath> =
branch.iter().map(ParquetFilePath::from).collect();
@@ -276,12 +282,13 @@ async fn try_compact_partition(
let (created_files, upgraded_files) = update_catalog(
Arc::clone(&components),
partition_id,
+ saved_parquet_file_state,
files_to_delete,
upgrade,
created_file_params,
target_level,
)
- .await;
+ .await?;
// Extend created files, upgraded files and files_to_keep to files_next
files_next.extend(created_files);
@@ -409,17 +416,41 @@ async fn upload_files_to_object_store(
.collect()
}
+async fn fetch_and_save_parquet_file_state(
+ components: &Components,
+ partition_id: PartitionId,
+) -> SavedParquetFileState {
+ let catalog_files = components.partition_files_source.fetch(partition_id).await;
+ SavedParquetFileState::from(&catalog_files)
+}
+
/// Update the catalog to create, soft delete and upgrade corresponding given input
/// to provided target level
/// Return created and upgraded files
async fn update_catalog(
components: Arc<Components>,
partition_id: PartitionId,
+ saved_parquet_file_state: SavedParquetFileState,
files_to_delete: Vec<ParquetFile>,
files_to_upgrade: Vec<ParquetFile>,
file_params_to_create: Vec<ParquetFileParams>,
target_level: CompactionLevel,
-) -> (Vec<ParquetFile>, Vec<ParquetFile>) {
+) -> Result<(Vec<ParquetFile>, Vec<ParquetFile>), DynError> {
+ let current_parquet_file_state =
+ fetch_and_save_parquet_file_state(&components, partition_id).await;
+
+ if saved_parquet_file_state != current_parquet_file_state {
+ // Someone else has changed the files in the catalog since we started compacting; throw away our work and
+ // don't commit anything.
+ return Err(Box::new(SimpleError::new(
+ ErrorKind::ConcurrentModification,
+ format!(
+ "Parquet files for partition {partition_id} have been modified since compaction started. \
+ Saved: {saved_parquet_file_state:?} != Current: {current_parquet_file_state:?}"
+ ),
+ )));
+ }
+
let created_ids = components
.commit
.commit(
@@ -447,5 +478,5 @@ async fn update_catalog(
})
.collect::<Vec<_>>();
- (created_file_params, upgraded_files)
+ Ok((created_file_params, upgraded_files))
}
diff --git a/compactor2/src/error.rs b/compactor2/src/error.rs
index 3ae7be1b2b..aed1982a35 100644
--- a/compactor2/src/error.rs
+++ b/compactor2/src/error.rs
@@ -23,6 +23,12 @@ pub enum ErrorKind {
/// Partition took too long.
Timeout,
+ /// Concurrent modification.
+ ///
+ /// This compactor instance expected to be the only process working on this partition's Parquet files, but the
+ /// Parquet files were modified while the compactor was doing work, so the work was thrown away and not committed.
+ ConcurrentModification,
+
/// Unknown/unexpected error.
///
/// This will likely mark the affected partition as "skipped" and the compactor will no longer touch it.
@@ -36,6 +42,7 @@ impl ErrorKind {
Self::ObjectStore,
Self::OutOfMemory,
Self::Timeout,
+ Self::ConcurrentModification,
Self::Unknown,
]
}
@@ -46,6 +53,7 @@ impl ErrorKind {
Self::ObjectStore => "object_store",
Self::OutOfMemory => "out_of_memory",
Self::Timeout => "timeout",
+ Self::ConcurrentModification => "concurrent_modification",
Self::Unknown => "unknown",
}
}
diff --git a/compactor2/src/partition_info.rs b/compactor2/src/partition_info.rs
index ada8b2da31..54835d0fe2 100644
--- a/compactor2/src/partition_info.rs
+++ b/compactor2/src/partition_info.rs
@@ -2,7 +2,10 @@
use std::sync::Arc;
-use data_types::{NamespaceId, PartitionId, PartitionKey, Table, TableSchema};
+use data_types::{
+ CompactionLevel, NamespaceId, ParquetFile, ParquetFileId, PartitionId, PartitionKey, Table,
+ TableSchema,
+};
use schema::sort::SortKey;
/// Information about the Partition being compacted
@@ -36,3 +39,149 @@ impl PartitionInfo {
self.table_schema.column_count()
}
}
+
+/// Saved snapshot of a partition's Parquet files' IDs and compaction levels. Save this state at the beginning of a
+/// compaction operation, then just before committing ask for the state again. If the two saved states are identical,
+/// we assume no other compactor instance has compacted this partition and this compactor instance should commit its
+/// work. If the two saved states differ, throw away the work and do not commit as the Parquet files have been changed
+/// by some other process while this compactor instance was working.
+#[derive(Debug, Clone, PartialEq)]
+pub(crate) struct SavedParquetFileState {
+ ids_and_levels: Vec<(ParquetFileId, CompactionLevel)>,
+}
+
+impl<'a, T> From<T> for SavedParquetFileState
+where
+ T: IntoIterator<Item = &'a ParquetFile>,
+{
+ fn from(parquet_files: T) -> Self {
+ let mut ids_and_levels: Vec<_> = parquet_files
+ .into_iter()
+ .map(|pf| (pf.id, pf.compaction_level))
+ .collect();
+
+ ids_and_levels.sort();
+
+ Self { ids_and_levels }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use iox_tests::ParquetFileBuilder;
+
+ #[test]
+ fn saved_state_sorts_by_parquet_file_id() {
+ let pf_id1_level_0 = ParquetFileBuilder::new(1)
+ .with_compaction_level(CompactionLevel::Initial)
+ .build();
+ let pf_id2_level_2 = ParquetFileBuilder::new(2)
+ .with_compaction_level(CompactionLevel::Final)
+ .build();
+ let pf_id3_level_1 = ParquetFileBuilder::new(3)
+ .with_compaction_level(CompactionLevel::FileNonOverlapped)
+ .build();
+
+ let saved_state_1 =
+ SavedParquetFileState::from([&pf_id1_level_0, &pf_id2_level_2, &pf_id3_level_1]);
+ let saved_state_2 =
+ SavedParquetFileState::from([&pf_id3_level_1, &pf_id1_level_0, &pf_id2_level_2]);
+
+ assert_eq!(saved_state_1, saved_state_2);
+ }
+
+ #[test]
+ fn both_empty_parquet_files() {
+ let saved_state_1 = SavedParquetFileState::from([]);
+ let saved_state_2 = SavedParquetFileState::from([]);
+
+ assert_eq!(saved_state_1, saved_state_2);
+ }
+
+ #[test]
+ fn one_empty_parquet_files() {
+ let pf_id1_level_0 = ParquetFileBuilder::new(1)
+ .with_compaction_level(CompactionLevel::Initial)
+ .build();
+
+ let saved_state_1 = SavedParquetFileState::from([&pf_id1_level_0]);
+ let saved_state_2 = SavedParquetFileState::from([]);
+
+ assert_ne!(saved_state_1, saved_state_2);
+ }
+
+ #[test]
+ fn missing_files_not_equal() {
+ let pf_id1_level_0 = ParquetFileBuilder::new(1)
+ .with_compaction_level(CompactionLevel::Initial)
+ .build();
+ let pf_id2_level_2 = ParquetFileBuilder::new(2)
+ .with_compaction_level(CompactionLevel::Final)
+ .build();
+ let pf_id3_level_1 = ParquetFileBuilder::new(3)
+ .with_compaction_level(CompactionLevel::FileNonOverlapped)
+ .build();
+
+ let saved_state_1 =
+ SavedParquetFileState::from([&pf_id1_level_0, &pf_id2_level_2, &pf_id3_level_1]);
+ let saved_state_2 = SavedParquetFileState::from([&pf_id3_level_1, &pf_id1_level_0]);
+
+ assert_ne!(saved_state_1, saved_state_2);
+ }
+
+ #[test]
+ fn additional_files_not_equal() {
+ let pf_id1_level_0 = ParquetFileBuilder::new(1)
+ .with_compaction_level(CompactionLevel::Initial)
+ .build();
+ let pf_id2_level_2 = ParquetFileBuilder::new(2)
+ .with_compaction_level(CompactionLevel::Final)
+ .build();
+ let pf_id3_level_1 = ParquetFileBuilder::new(3)
+ .with_compaction_level(CompactionLevel::FileNonOverlapped)
+ .build();
+
+ let saved_state_1 = SavedParquetFileState::from([&pf_id3_level_1, &pf_id1_level_0]);
+ let saved_state_2 =
+ SavedParquetFileState::from([&pf_id1_level_0, &pf_id2_level_2, &pf_id3_level_1]);
+
+ assert_ne!(saved_state_1, saved_state_2);
+ }
+
+ #[test]
+ fn changed_compaction_level_not_equal() {
+ let pf_id1_level_0 = ParquetFileBuilder::new(1)
+ .with_compaction_level(CompactionLevel::Initial)
+ .build();
+ let pf_id1_level_1 = ParquetFileBuilder::new(1)
+ .with_compaction_level(CompactionLevel::FileNonOverlapped)
+ .build();
+ let pf_id2_level_2 = ParquetFileBuilder::new(2)
+ .with_compaction_level(CompactionLevel::Final)
+ .build();
+
+ let saved_state_1 = SavedParquetFileState::from([&pf_id1_level_0, &pf_id2_level_2]);
+ let saved_state_2 = SavedParquetFileState::from([&pf_id1_level_1, &pf_id2_level_2]);
+
+ assert_ne!(saved_state_1, saved_state_2);
+ }
+
+ #[test]
+ fn same_number_of_files_different_ids_not_equal() {
+ let pf_id1_level_0 = ParquetFileBuilder::new(1)
+ .with_compaction_level(CompactionLevel::Initial)
+ .build();
+ let pf_id2_level_0 = ParquetFileBuilder::new(2)
+ .with_compaction_level(CompactionLevel::Initial)
+ .build();
+ let pf_id3_level_2 = ParquetFileBuilder::new(3)
+ .with_compaction_level(CompactionLevel::Final)
+ .build();
+
+ let saved_state_1 = SavedParquetFileState::from([&pf_id1_level_0, &pf_id3_level_2]);
+ let saved_state_2 = SavedParquetFileState::from([&pf_id2_level_0, &pf_id3_level_2]);
+
+ assert_ne!(saved_state_1, saved_state_2);
+ }
+}
|
71380196367b6b980dfebb24e8d7fa7acf82cddc
|
Michael Gattozzi
|
2024-05-02 13:39:20
|
Upgrade to Rust 1.78.0 (#24953)
|
This fixes new lints that have come up in the latest edition of clippy and moves
.cargo/config to .cargo/config.toml as the previous filename is now deprecated.
| null |
chore: Upgrade to Rust 1.78.0 (#24953)
This fixes new lints that have come up in the latest edition of clippy and moves
.cargo/config to .cargo/config.toml as the previous filename is now deprecated.
|
diff --git a/.cargo/config b/.cargo/config.toml
similarity index 100%
rename from .cargo/config
rename to .cargo/config.toml
diff --git a/influxdb3_server/src/http.rs b/influxdb3_server/src/http.rs
index ef3562002d..107dcc0747 100644
--- a/influxdb3_server/src/http.rs
+++ b/influxdb3_server/src/http.rs
@@ -890,8 +890,6 @@ pub(crate) async fn route_request<W: WriteBuffer, Q: QueryExecutor, T: TimeProvi
mut req: Request<Body>,
) -> Result<Response<Body>, Infallible>
where
- W: WriteBuffer,
- Q: QueryExecutor,
Error: From<<Q as QueryExecutor>::Error>,
{
if let Err(e) = http_server.authorize_request(&mut req).await {
diff --git a/influxdb3_server/src/http/v1.rs b/influxdb3_server/src/http/v1.rs
index c6c13d69a6..d9cb00ea98 100644
--- a/influxdb3_server/src/http/v1.rs
+++ b/influxdb3_server/src/http/v1.rs
@@ -378,7 +378,7 @@ impl QueryResponseStream {
let mut columns = vec!["".to_string(); self.column_map.len()];
self.column_map
.iter()
- .for_each(|(k, i)| columns[*i] = k.to_owned());
+ .for_each(|(k, i)| k.clone_into(&mut columns[*i]));
columns
}
diff --git a/influxdb3_write/src/catalog.rs b/influxdb3_write/src/catalog.rs
index 594c7f69be..b70953f12f 100644
--- a/influxdb3_write/src/catalog.rs
+++ b/influxdb3_write/src/catalog.rs
@@ -186,7 +186,7 @@ impl InnerCatalog {
#[cfg(test)]
pub fn db_exists(&self, db_name: &str) -> bool {
- self.databases.get(db_name).is_some()
+ self.databases.contains_key(db_name)
}
}
diff --git a/influxdb3_write/src/paths.rs b/influxdb3_write/src/paths.rs
index b883f2ff60..4e6a1f0a9e 100644
--- a/influxdb3_write/src/paths.rs
+++ b/influxdb3_write/src/paths.rs
@@ -1,6 +1,7 @@
use crate::SegmentId;
use chrono::prelude::*;
use object_store::path::Path as ObjPath;
+use std::fmt;
use std::ops::Deref;
use std::path::Path;
use std::path::PathBuf;
@@ -109,9 +110,9 @@ impl SegmentWalFilePath {
}
}
-impl ToString for SegmentWalFilePath {
- fn to_string(&self) -> String {
- self.0.to_string_lossy().into_owned()
+impl fmt::Display for SegmentWalFilePath {
+ fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
+ write!(f, "{}", self.0.to_string_lossy())
}
}
diff --git a/rust-toolchain.toml b/rust-toolchain.toml
index 7aac9b8f1b..1ec3ae9268 100644
--- a/rust-toolchain.toml
+++ b/rust-toolchain.toml
@@ -1,3 +1,3 @@
[toolchain]
-channel = "1.77.0"
+channel = "1.78.0"
components = ["rustfmt", "clippy", "rust-analyzer"]
|
303c9e439853b92b5e2e003773a386af180b15aa
|
Dom Dwyer
|
2023-01-11 17:15:04
|
fix e2e test
|
This test was relying on a graceful shutdown of the ingester to drive a
WAL replay, restoring the buffered state at startup.
Now the shutdown causes the data to be persisted and not replayed, this
didn't work.
| null |
test: fix e2e test
This test was relying on a graceful shutdown of the ingester to drive a
WAL replay, restoring the buffered state at startup.
Now the shutdown causes the data to be persisted and not replayed, this
didn't work.
|
diff --git a/influxdb_iox/tests/end_to_end_cases/ingester.rs b/influxdb_iox/tests/end_to_end_cases/ingester.rs
index 7fa6d90398..8f2d320ee2 100644
--- a/influxdb_iox/tests/end_to_end_cases/ingester.rs
+++ b/influxdb_iox/tests/end_to_end_cases/ingester.rs
@@ -267,6 +267,14 @@ mod kafkaless_rpc_write {
// Restart the ingester and ensure it gets a new UUID
cluster.restart_ingester().await;
+
+ // Populate the ingester with some data so it returns a successful
+ // response containing the UUID.
+ let lp = format!("{},tag1=A,tag2=B val=42i 123456", table_name);
+ let response = cluster.write_to_router(lp).await;
+ assert_eq!(response.status(), StatusCode::NO_CONTENT);
+
+ // Query for the new UUID and assert it has changed.
let mut performed_query = querier_flight.do_get(query).await.unwrap().into_inner();
let (msg, app_metadata) = next_message(&mut performed_query).await.unwrap();
assert!(matches!(msg, DecodedPayload::None), "{:?}", msg);
|
1cb36526925c72b2ccdc840b69b5034393b3a2fb
|
Trevor Hilton
|
2024-05-17 11:21:01
|
add SystemSchemaProvider to QueryExecutor (#24990)
|
A shell for the `system` table provider was added to the QueryExecutorImpl
which currently does not do anything, but will enable us to tie the
different system table providers into it.
The QueryLog was elevated from the `Database`, i.e., namespace provider,
to the QueryExecutorImpl, so that it lives accross queries.
| null |
feat: add SystemSchemaProvider to QueryExecutor (#24990)
A shell for the `system` table provider was added to the QueryExecutorImpl
which currently does not do anything, but will enable us to tie the
different system table providers into it.
The QueryLog was elevated from the `Database`, i.e., namespace provider,
to the QueryExecutorImpl, so that it lives accross queries.
|
diff --git a/influxdb3_server/src/query_executor.rs b/influxdb3_server/src/query_executor.rs
index 2b01cecb02..d27d193875 100644
--- a/influxdb3_server/src/query_executor.rs
+++ b/influxdb3_server/src/query_executor.rs
@@ -55,6 +55,7 @@ pub struct QueryExecutorImpl<W> {
exec: Arc<Executor>,
datafusion_config: Arc<HashMap<String, String>>,
query_execution_semaphore: Arc<InstrumentedAsyncSemaphore>,
+ query_log: Arc<QueryLog>,
}
impl<W: WriteBuffer> QueryExecutorImpl<W> {
@@ -72,12 +73,19 @@ impl<W: WriteBuffer> QueryExecutorImpl<W> {
));
let query_execution_semaphore =
Arc::new(semaphore_metrics.new_semaphore(concurrent_query_limit));
+ // TODO Fine tune this number or make configurable
+ const QUERY_LOG_LIMIT: usize = 1_000;
+ let query_log = Arc::new(QueryLog::new(
+ QUERY_LOG_LIMIT,
+ Arc::new(iox_time::SystemProvider::new()),
+ ));
Self {
catalog,
write_buffer,
exec,
datafusion_config,
query_execution_semaphore,
+ query_log,
}
}
}
@@ -282,7 +290,7 @@ impl<W: WriteBuffer> QueryDatabase for QueryExecutorImpl<W> {
&self,
name: &str,
span: Option<Span>,
- _include_debug_info_tables: bool,
+ include_debug_info_tables: bool,
) -> Result<Option<Arc<dyn QueryNamespace>>, DataFusionError> {
let _span_recorder = SpanRecorder::new(span);
@@ -297,6 +305,8 @@ impl<W: WriteBuffer> QueryDatabase for QueryExecutorImpl<W> {
Arc::clone(&self.write_buffer) as _,
Arc::clone(&self.exec),
Arc::clone(&self.datafusion_config),
+ Arc::clone(&self.query_log),
+ include_debug_info_tables,
))))
}
@@ -312,13 +322,14 @@ impl<W: WriteBuffer> QueryDatabase for QueryExecutorImpl<W> {
}
}
-#[derive(Debug)]
+#[derive(Debug, Clone)]
pub struct Database<B> {
db_schema: Arc<DatabaseSchema>,
write_buffer: Arc<B>,
exec: Arc<Executor>,
datafusion_config: Arc<HashMap<String, String>>,
query_log: Arc<QueryLog>,
+ system_schema_provider: Arc<SystemSchemaProvider>,
}
impl<B: WriteBuffer> Database<B> {
@@ -327,13 +338,13 @@ impl<B: WriteBuffer> Database<B> {
write_buffer: Arc<B>,
exec: Arc<Executor>,
datafusion_config: Arc<HashMap<String, String>>,
+ query_log: Arc<QueryLog>,
+ include_debug_info_tables: bool,
) -> Self {
- // TODO Fine tune this number
- const QUERY_LOG_LIMIT: usize = 10;
-
- let query_log = Arc::new(QueryLog::new(
- QUERY_LOG_LIMIT,
- Arc::new(iox_time::SystemProvider::new()),
+ let system_schema_provider = Arc::new(SystemSchemaProvider::new(
+ write_buffer.catalog(),
+ Arc::clone(&query_log),
+ include_debug_info_tables,
));
Self {
db_schema,
@@ -341,6 +352,18 @@ impl<B: WriteBuffer> Database<B> {
exec,
datafusion_config,
query_log,
+ system_schema_provider,
+ }
+ }
+
+ fn from_namespace(db: &Self) -> Self {
+ Self {
+ db_schema: Arc::clone(&db.db_schema),
+ write_buffer: Arc::clone(&db.write_buffer),
+ exec: Arc::clone(&db.exec),
+ datafusion_config: Arc::clone(&db.datafusion_config),
+ query_log: Arc::clone(&db.query_log),
+ system_schema_provider: Arc::clone(&db.system_schema_provider),
}
}
@@ -404,17 +427,10 @@ impl<B: WriteBuffer> QueryNamespace for Database<B> {
span_ctx: Option<SpanContext>,
_config: Option<&QueryConfig>,
) -> IOxSessionContext {
- let qdb = Self::new(
- Arc::clone(&self.db_schema),
- Arc::clone(&self.write_buffer),
- Arc::clone(&self.exec),
- Arc::clone(&self.datafusion_config),
- );
-
let mut cfg = self
.exec
.new_session_config()
- .with_default_catalog(Arc::new(qdb))
+ .with_default_catalog(Arc::new(Self::from_namespace(self)))
.with_span_context(span_ctx);
for (k, v) in self.datafusion_config.as_ref() {
@@ -437,15 +453,8 @@ impl<B: WriteBuffer> CatalogProvider for Database<B> {
fn schema(&self, name: &str) -> Option<Arc<dyn SchemaProvider>> {
info!("CatalogProvider schema {}", name);
- let qdb = Self::new(
- Arc::clone(&self.db_schema),
- Arc::clone(&self.write_buffer),
- Arc::clone(&self.exec),
- Arc::clone(&self.datafusion_config),
- );
-
match name {
- DEFAULT_SCHEMA => Some(Arc::new(qdb)),
+ DEFAULT_SCHEMA => Some(Arc::new(Self::from_namespace(self))),
_ => None,
}
}
@@ -486,7 +495,6 @@ impl<B: WriteBuffer> QueryTable<B> {
filters: &[Expr],
_limit: Option<usize>,
) -> Result<Vec<Arc<dyn QueryChunk>>, DataFusionError> {
- // TODO - this is only pulling from write buffer, and not parquet?
self.write_buffer.get_table_chunks(
&self.db_schema.name,
self.name.as_ref(),
@@ -545,3 +553,37 @@ impl<B: WriteBuffer> TableProvider for QueryTable<B> {
provider.scan(ctx, projection, &filters, limit).await
}
}
+
+const _QUERIES_TABLE: &str = "queries";
+const _PARQUET_FILES_TABLE: &str = "parquet_files";
+
+struct SystemSchemaProvider {
+ tables: HashMap<&'static str, Arc<dyn TableProvider>>,
+}
+
+impl std::fmt::Debug for SystemSchemaProvider {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ let mut keys = self.tables.keys().copied().collect::<Vec<_>>();
+ keys.sort_unstable();
+
+ f.debug_struct("SystemSchemaProvider")
+ .field("tables", &keys.join(", "))
+ .finish()
+ }
+}
+
+impl SystemSchemaProvider {
+ fn new(_catalog: Arc<Catalog>, _query_log: Arc<QueryLog>, include_debug_info: bool) -> Self {
+ let tables = HashMap::new();
+ if include_debug_info {
+ // Using todo!() here causes gRPC integration tests to fail, likely because they
+ // enable debug mode by default, thus entering this if block. So, just leaving this
+ // here in lieu of todo!().
+ //
+ // Eventually, we will implement the queries and parquet_files tables and they will
+ // be injected to the provider's table hashmap here...
+ info!("TODO - gather system tables");
+ }
+ Self { tables }
+ }
+}
|
52e54e0f8d84e7f99290965175142831e80a7c58
|
Marco Neumann
|
2023-03-29 08:24:17
|
more aggressive `CombineChunks` (#7355)
|
Try to combine chunks even when not all Union-arms/inputs are
combinable. This will later help to transform
```yaml
---
union:
- parquet:
files: [f1]
- parquet:
files: [f2]
- dedup:
parquet:
files: [f3]
```
into
```yaml
---
union:
- parquet:
files: [f1, f2]
- dedup:
parquet:
files: [f3]
```
Helps #6098.
|
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
feat: more aggressive `CombineChunks` (#7355)
Try to combine chunks even when not all Union-arms/inputs are
combinable. This will later help to transform
```yaml
---
union:
- parquet:
files: [f1]
- parquet:
files: [f2]
- dedup:
parquet:
files: [f3]
```
into
```yaml
---
union:
- parquet:
files: [f1, f2]
- dedup:
parquet:
files: [f3]
```
Helps #6098.
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
diff --git a/iox_query/src/physical_optimizer/combine_chunks.rs b/iox_query/src/physical_optimizer/combine_chunks.rs
index 2840250865..1417f3ea68 100644
--- a/iox_query/src/physical_optimizer/combine_chunks.rs
+++ b/iox_query/src/physical_optimizer/combine_chunks.rs
@@ -3,9 +3,9 @@ use std::sync::Arc;
use datafusion::{
common::tree_node::{Transformed, TreeNode},
config::ConfigOptions,
- error::Result,
+ error::{DataFusionError, Result},
physical_optimizer::PhysicalOptimizerRule,
- physical_plan::ExecutionPlan,
+ physical_plan::{union::UnionExec, ExecutionPlan},
};
use predicate::Predicate;
@@ -35,14 +35,34 @@ impl PhysicalOptimizerRule for CombineChunks {
config: &ConfigOptions,
) -> Result<Arc<dyn ExecutionPlan>> {
plan.transform_up(&|plan| {
- if let Some((schema, chunks, output_sort_key)) = extract_chunks(plan.as_ref()) {
- return Ok(Transformed::Yes(chunks_to_physical_nodes(
- &schema,
- output_sort_key.as_ref(),
- chunks,
- Predicate::new(),
- config.execution.target_partitions,
- )));
+ if let Some(union_exec) = plan.as_any().downcast_ref::<UnionExec>() {
+ let (inputs_with_chunks, inputs_other): (Vec<_>, Vec<_>) = union_exec
+ .inputs()
+ .iter()
+ .cloned()
+ .partition(|plan| {
+ extract_chunks(plan.as_ref()).is_some()
+ });
+
+ if inputs_with_chunks.is_empty() {
+ return Ok(Transformed::No(plan));
+ }
+ let union_of_chunks = UnionExec::new(inputs_with_chunks);
+
+ if let Some((schema, chunks, output_sort_key)) = extract_chunks(&union_of_chunks) {
+ let union_of_chunks = chunks_to_physical_nodes(
+ &schema,
+ output_sort_key.as_ref(),
+ chunks,
+ Predicate::new(),
+ config.execution.target_partitions,
+ );
+ let Some(union_of_chunks) = union_of_chunks.as_any().downcast_ref::<UnionExec>() else {
+ return Err(DataFusionError::External(format!("Expected chunks_to_physical_nodes to produce UnionExec but got {union_of_chunks:?}").into()));
+ };
+ let final_union = UnionExec::new(union_of_chunks.inputs().iter().cloned().chain(inputs_other.into_iter()).collect());
+ return Ok(Transformed::Yes(Arc::new(final_union)));
+ }
}
Ok(Transformed::No(plan))
@@ -60,14 +80,17 @@ impl PhysicalOptimizerRule for CombineChunks {
#[cfg(test)]
mod tests {
- use datafusion::physical_plan::union::UnionExec;
+ use datafusion::{
+ physical_plan::{expressions::Literal, filter::FilterExec, union::UnionExec},
+ scalar::ScalarValue,
+ };
use crate::{physical_optimizer::test_util::OptimizationTest, test::TestChunk, QueryChunkMeta};
use super::*;
#[test]
- fn test_combine() {
+ fn test_combine_single_union_tree() {
let chunk1 = TestChunk::new("table").with_id(1);
let chunk2 = TestChunk::new("table").with_id(2).with_dummy_parquet_file();
let chunk3 = TestChunk::new("table").with_id(3);
@@ -113,4 +136,110 @@ mod tests {
"###
);
}
+
+ #[test]
+ fn test_combine_some_union_arms() {
+ let chunk1 = TestChunk::new("table").with_id(1).with_dummy_parquet_file();
+ let chunk2 = TestChunk::new("table").with_id(1).with_dummy_parquet_file();
+ let chunk3 = TestChunk::new("table").with_id(1).with_dummy_parquet_file();
+ let schema = chunk1.schema().as_arrow();
+ let plan = Arc::new(UnionExec::new(vec![
+ chunks_to_physical_nodes(&schema, None, vec![Arc::new(chunk1)], Predicate::new(), 2),
+ chunks_to_physical_nodes(&schema, None, vec![Arc::new(chunk2)], Predicate::new(), 2),
+ Arc::new(
+ FilterExec::try_new(
+ Arc::new(Literal::new(ScalarValue::from(false))),
+ chunks_to_physical_nodes(
+ &schema,
+ None,
+ vec![Arc::new(chunk3)],
+ Predicate::new(),
+ 2,
+ ),
+ )
+ .unwrap(),
+ ),
+ ]));
+ let opt = CombineChunks::default();
+ insta::assert_yaml_snapshot!(
+ OptimizationTest::new(plan, opt),
+ @r###"
+ ---
+ input:
+ - " UnionExec"
+ - " UnionExec"
+ - " ParquetExec: limit=None, partitions={1 group: [[1.parquet]]}, projection=[]"
+ - " UnionExec"
+ - " ParquetExec: limit=None, partitions={1 group: [[1.parquet]]}, projection=[]"
+ - " FilterExec: false"
+ - " UnionExec"
+ - " ParquetExec: limit=None, partitions={1 group: [[1.parquet]]}, projection=[]"
+ output:
+ Ok:
+ - " UnionExec"
+ - " ParquetExec: limit=None, partitions={2 groups: [[1.parquet], [1.parquet]]}, projection=[]"
+ - " FilterExec: false"
+ - " UnionExec"
+ - " ParquetExec: limit=None, partitions={1 group: [[1.parquet]]}, projection=[]"
+ "###
+ );
+ }
+
+ #[test]
+ fn test_no_chunks() {
+ let chunk1 = TestChunk::new("table").with_id(1);
+ let schema = chunk1.schema().as_arrow();
+ let plan = chunks_to_physical_nodes(&schema, None, vec![], Predicate::new(), 2);
+ let opt = CombineChunks::default();
+ let mut config = ConfigOptions::default();
+ config.execution.target_partitions = 2;
+ insta::assert_yaml_snapshot!(
+ OptimizationTest::new_with_config(plan, opt, &config),
+ @r###"
+ ---
+ input:
+ - " EmptyExec: produce_one_row=false"
+ output:
+ Ok:
+ - " EmptyExec: produce_one_row=false"
+ "###
+ );
+ }
+
+ #[test]
+ fn test_no_valid_arms() {
+ let chunk1 = TestChunk::new("table").with_id(1);
+ let schema = chunk1.schema().as_arrow();
+ let plan = Arc::new(UnionExec::new(vec![Arc::new(
+ FilterExec::try_new(
+ Arc::new(Literal::new(ScalarValue::from(false))),
+ chunks_to_physical_nodes(
+ &schema,
+ None,
+ vec![Arc::new(chunk1)],
+ Predicate::new(),
+ 2,
+ ),
+ )
+ .unwrap(),
+ )]));
+ let opt = CombineChunks::default();
+ insta::assert_yaml_snapshot!(
+ OptimizationTest::new(plan, opt),
+ @r###"
+ ---
+ input:
+ - " UnionExec"
+ - " FilterExec: false"
+ - " UnionExec"
+ - " RecordBatchesExec: batches_groups=1 batches=0 total_rows=0"
+ output:
+ Ok:
+ - " UnionExec"
+ - " FilterExec: false"
+ - " UnionExec"
+ - " RecordBatchesExec: batches_groups=1 batches=0 total_rows=0"
+ "###
+ );
+ }
}
|
3dabccd84bac95740c5e44be6defdc32e3937290
|
wiedld
|
2023-07-11 08:41:12
|
scheduler job_status() (#8202)
|
* feat(idpe-17789): scheduler job_status() (#8121)
This block of work moves into the scheduler some of the specific downstream actions affiliated with compaction outcomes. Which responsibilities stay in the compactor, versus moved to the scheduler, roughly followed the heuristic of whether the action (a) had an impact on global catalog state (a.k.a. commits and partition skipping), (b) whether it's logging affiliated with compactor health (e.g. ParitionDoneSink logging outcomes) versus system health (e.g. logging commits), and (c) reporting to the scheduler on any errors encountered during compaction. This boundary is subject to change as we move forward.
Also, a noted caveat (TODO) on this commit. We have a CompactionJob which is used to track work handed off to each compactor. Currently it still uses the partition_id for tracking, but the followup PR will start moving the compactor to have more CompactionJob uuid awareness.
| null |
feat(idpe-17789): scheduler job_status() (#8202)
* feat(idpe-17789): scheduler job_status() (#8121)
This block of work moves into the scheduler some of the specific downstream actions affiliated with compaction outcomes. Which responsibilities stay in the compactor, versus moved to the scheduler, roughly followed the heuristic of whether the action (a) had an impact on global catalog state (a.k.a. commits and partition skipping), (b) whether it's logging affiliated with compactor health (e.g. ParitionDoneSink logging outcomes) versus system health (e.g. logging commits), and (c) reporting to the scheduler on any errors encountered during compaction. This boundary is subject to change as we move forward.
Also, a noted caveat (TODO) on this commit. We have a CompactionJob which is used to track work handed off to each compactor. Currently it still uses the partition_id for tracking, but the followup PR will start moving the compactor to have more CompactionJob uuid awareness.
|
diff --git a/Cargo.lock b/Cargo.lock
index 660dd90e99..5d27b0f6e0 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -1001,11 +1001,15 @@ dependencies = [
"async-trait",
"backoff",
"data_types",
+ "futures",
"iox_catalog",
"iox_tests",
"iox_time",
+ "itertools 0.11.0",
+ "metric",
"observability_deps",
"sharder",
+ "test_helpers",
"tokio",
"uuid",
"workspace-hack",
diff --git a/compactor/src/components/commit.rs b/compactor/src/components/commit.rs
new file mode 100644
index 0000000000..67ad62d5f0
--- /dev/null
+++ b/compactor/src/components/commit.rs
@@ -0,0 +1,55 @@
+use std::sync::Arc;
+
+use async_trait::async_trait;
+use compactor_scheduler::{
+ Commit, CommitUpdate, CompactionJob, CompactionJobStatus, CompactionJobStatusResult,
+ CompactionJobStatusVariant, Scheduler,
+};
+use data_types::{CompactionLevel, ParquetFile, ParquetFileId, ParquetFileParams, PartitionId};
+
+#[derive(Debug)]
+pub(crate) struct CommitToScheduler {
+ scheduler: Arc<dyn Scheduler>,
+}
+
+impl CommitToScheduler {
+ pub(crate) fn new(scheduler: Arc<dyn Scheduler>) -> Self {
+ Self { scheduler }
+ }
+}
+
+#[async_trait]
+impl Commit for CommitToScheduler {
+ async fn commit(
+ &self,
+ partition_id: PartitionId,
+ delete: &[ParquetFile],
+ upgrade: &[ParquetFile],
+ create: &[ParquetFileParams],
+ target_level: CompactionLevel,
+ ) -> Vec<ParquetFileId> {
+ match self
+ .scheduler
+ .job_status(CompactionJobStatus {
+ job: CompactionJob::new(partition_id),
+ status: CompactionJobStatusVariant::Update(CommitUpdate::new(
+ partition_id,
+ delete.into(),
+ upgrade.into(),
+ create.into(),
+ target_level,
+ )),
+ })
+ .await
+ {
+ Ok(CompactionJobStatusResult::UpdatedParquetFiles(ids)) => ids,
+ _ => panic!("commit failed"),
+ }
+ }
+}
+
+impl std::fmt::Display for CommitToScheduler {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ write!(f, "CommitToScheduler")
+ }
+}
diff --git a/compactor/src/components/hardcoded.rs b/compactor/src/components/hardcoded.rs
index 672869940b..9554c3bd4f 100644
--- a/compactor/src/components/hardcoded.rs
+++ b/compactor/src/components/hardcoded.rs
@@ -4,7 +4,9 @@
use std::{sync::Arc, time::Duration};
-use compactor_scheduler::{create_scheduler, PartitionsSource, Scheduler};
+use compactor_scheduler::{
+ create_scheduler, Commit, PartitionDoneSink, PartitionsSource, Scheduler,
+};
use data_types::CompactionLevel;
use object_store::memory::InMemory;
@@ -12,11 +14,7 @@ use crate::{config::Config, error::ErrorKind, object_store::ignore_writes::Ignor
use super::{
changed_files_filter::logging::LoggingChangedFiles,
- combos::{throttle_partition::throttle_partition, unique_partitions::unique_partitions},
- commit::{
- catalog::CatalogCommit, logging::LoggingCommitWrapper, metrics::MetricsCommitWrapper,
- mock::MockCommit, Commit,
- },
+ commit::CommitToScheduler,
df_plan_exec::{
dedicated::DedicatedDataFusionPlanExec, noop::NoopDataFusionPlanExec, DataFusionPlanExec,
},
@@ -39,9 +37,8 @@ use super::{
},
parquet_files_sink::{dispatch::DispatchParquetFilesSink, ParquetFilesSink},
partition_done_sink::{
- catalog::CatalogPartitionDoneSink, error_kind::ErrorKindPartitionDoneSinkWrapper,
- logging::LoggingPartitionDoneSinkWrapper, metrics::MetricsPartitionDoneSinkWrapper,
- mock::MockPartitionDoneSink, PartitionDoneSink,
+ error_kind::ErrorKindPartitionDoneSinkWrapper, logging::LoggingPartitionDoneSinkWrapper,
+ metrics::MetricsPartitionDoneSinkWrapper, outcome::PartitionDoneSinkToScheduler,
},
partition_files_source::{
catalog::{CatalogPartitionFilesSource, QueryRateLimiter},
@@ -93,6 +90,8 @@ pub fn hardcoded_components(config: &Config) -> Arc<Components> {
config.scheduler_config.clone(),
Arc::clone(&config.catalog),
Arc::clone(&config.time_provider),
+ Arc::clone(&config.metric_registry),
+ config.shadow_mode,
);
let (partitions_source, commit, partition_done_sink) =
make_partitions_source_commit_partition_sink(config, Arc::clone(&scheduler));
@@ -126,49 +125,14 @@ fn make_partitions_source_commit_partition_sink(
Arc<dyn Commit>,
Arc<dyn PartitionDoneSink>,
) {
- let partitions_source = ScheduledPartitionsSource::new(scheduler);
-
- let partition_done_sink: Arc<dyn PartitionDoneSink> = if config.shadow_mode {
- Arc::new(MockPartitionDoneSink::new())
- } else {
- Arc::new(CatalogPartitionDoneSink::new(
- config.backoff_config.clone(),
- Arc::clone(&config.catalog),
- ))
- };
-
- let commit: Arc<dyn Commit> = if config.shadow_mode {
- Arc::new(MockCommit::new())
- } else {
- Arc::new(CatalogCommit::new(
- config.backoff_config.clone(),
- Arc::clone(&config.catalog),
- ))
- };
-
- let commit = if let Some(commit_wrapper) = config.commit_wrapper.as_ref() {
- commit_wrapper.wrap(commit)
- } else {
- commit
- };
-
- let (partitions_source, partition_done_sink) =
- unique_partitions(partitions_source, partition_done_sink, 1);
+ let partitions_source = ScheduledPartitionsSource::new(Arc::clone(&scheduler));
- let (partitions_source, commit, partition_done_sink) = throttle_partition(
- partitions_source,
- commit,
- partition_done_sink,
- Arc::clone(&config.time_provider),
- Duration::from_secs(60),
- 1,
- );
+ let commit = CommitToScheduler::new(Arc::clone(&scheduler));
- let commit = Arc::new(LoggingCommitWrapper::new(MetricsCommitWrapper::new(
- commit,
- &config.metric_registry,
- )));
+ let partition_done_sink = PartitionDoneSinkToScheduler::new(Arc::clone(&scheduler));
+ // compactors are responsible for error classification
+ // and any future decisions regarding graceful shutdown
let partition_done_sink: Arc<dyn PartitionDoneSink> = if config.all_errors_are_fatal {
Arc::new(partition_done_sink)
} else {
@@ -185,6 +149,7 @@ fn make_partitions_source_commit_partition_sink(
})
.copied()
.collect(),
+ scheduler,
))
};
let partition_done_sink = Arc::new(LoggingPartitionDoneSinkWrapper::new(
@@ -210,7 +175,7 @@ fn make_partitions_source_commit_partition_sink(
))
};
- (partitions_source, commit, partition_done_sink)
+ (partitions_source, Arc::new(commit), partition_done_sink)
}
fn make_partition_stream(
diff --git a/compactor/src/components/mod.rs b/compactor/src/components/mod.rs
index 1ce2eea844..2cf1dc9638 100644
--- a/compactor/src/components/mod.rs
+++ b/compactor/src/components/mod.rs
@@ -1,19 +1,19 @@
use std::sync::Arc;
+use compactor_scheduler::{Commit, PartitionDoneSink};
+
use self::{
- changed_files_filter::ChangedFilesFilter, commit::Commit, df_plan_exec::DataFusionPlanExec,
+ changed_files_filter::ChangedFilesFilter, df_plan_exec::DataFusionPlanExec,
df_planner::DataFusionPlanner, divide_initial::DivideInitial, file_classifier::FileClassifier,
ir_planner::IRPlanner, parquet_files_sink::ParquetFilesSink,
- partition_done_sink::PartitionDoneSink, partition_files_source::PartitionFilesSource,
- partition_filter::PartitionFilter, partition_info_source::PartitionInfoSource,
- partition_stream::PartitionStream,
+ partition_files_source::PartitionFilesSource, partition_filter::PartitionFilter,
+ partition_info_source::PartitionInfoSource, partition_stream::PartitionStream,
post_classification_partition_filter::PostClassificationPartitionFilter,
round_info_source::RoundInfoSource, round_split::RoundSplit, scratchpad::ScratchpadGen,
};
pub mod changed_files_filter;
-pub mod combos;
-pub mod commit;
+pub(crate) mod commit;
pub mod df_plan_exec;
pub mod df_planner;
pub mod divide_initial;
@@ -61,7 +61,7 @@ pub struct Components {
pub post_classification_partition_filter: Arc<dyn PostClassificationPartitionFilter>,
/// Records "partition is done" status for given partition.
pub partition_done_sink: Arc<dyn PartitionDoneSink>,
- /// Commits changes (i.e. deletion and creation) to the catalog.
+ /// Commits changes (i.e. deletion and creation).
pub commit: Arc<dyn Commit>,
/// Creates `PlanIR` that describes what files should be compacted and updated
pub ir_planner: Arc<dyn IRPlanner>,
diff --git a/compactor/src/components/partition_done_sink/error_kind.rs b/compactor/src/components/partition_done_sink/error_kind.rs
index 0848906215..f06f141436 100644
--- a/compactor/src/components/partition_done_sink/error_kind.rs
+++ b/compactor/src/components/partition_done_sink/error_kind.rs
@@ -1,12 +1,14 @@
-use std::{collections::HashSet, fmt::Display};
+use std::{collections::HashSet, fmt::Display, sync::Arc};
use async_trait::async_trait;
+use compactor_scheduler::{
+ CompactionJob, CompactionJobStatus, CompactionJobStatusResult, CompactionJobStatusVariant,
+ ErrorKind as SchedulerErrorKind, PartitionDoneSink, Scheduler,
+};
use data_types::PartitionId;
use crate::error::{DynError, ErrorKind, ErrorKindExt};
-use super::PartitionDoneSink;
-
#[derive(Debug)]
pub struct ErrorKindPartitionDoneSinkWrapper<T>
where
@@ -14,14 +16,19 @@ where
{
kind: HashSet<ErrorKind>,
inner: T,
+ scheduler: Arc<dyn Scheduler>,
}
impl<T> ErrorKindPartitionDoneSinkWrapper<T>
where
T: PartitionDoneSink,
{
- pub fn new(inner: T, kind: HashSet<ErrorKind>) -> Self {
- Self { kind, inner }
+ pub fn new(inner: T, kind: HashSet<ErrorKind>, scheduler: Arc<dyn Scheduler>) -> Self {
+ Self {
+ kind,
+ inner,
+ scheduler,
+ }
}
}
@@ -45,6 +52,24 @@ where
match res {
Ok(()) => self.inner.record(partition, Ok(())).await,
Err(e) if self.kind.contains(&e.classify()) => {
+ let scheduler_error = match SchedulerErrorKind::from(e.classify()) {
+ SchedulerErrorKind::OutOfMemory => SchedulerErrorKind::OutOfMemory,
+ SchedulerErrorKind::ObjectStore => SchedulerErrorKind::ObjectStore,
+ SchedulerErrorKind::Timeout => SchedulerErrorKind::Timeout,
+ SchedulerErrorKind::Unknown(_) => SchedulerErrorKind::Unknown(e.to_string()),
+ };
+
+ match self
+ .scheduler
+ .job_status(CompactionJobStatus {
+ job: CompactionJob::new(partition),
+ status: CompactionJobStatusVariant::Error(scheduler_error),
+ })
+ .await
+ {
+ Ok(CompactionJobStatusResult::Ack) => {}
+ _ => panic!("unexpected result from scheduler"),
+ }
self.inner.record(partition, Err(e)).await;
}
_ => {}
@@ -56,9 +81,10 @@ where
mod tests {
use std::{collections::HashMap, sync::Arc};
- use crate::components::partition_done_sink::mock::MockPartitionDoneSink;
-
+ use compactor_scheduler::{create_test_scheduler, MockPartitionDoneSink};
use datafusion::error::DataFusionError;
+ use iox_tests::TestCatalog;
+ use iox_time::{MockProvider, Time};
use object_store::Error as ObjectStoreError;
use super::*;
@@ -68,6 +94,11 @@ mod tests {
let sink = ErrorKindPartitionDoneSinkWrapper::new(
MockPartitionDoneSink::new(),
HashSet::from([ErrorKind::ObjectStore, ErrorKind::OutOfMemory]),
+ create_test_scheduler(
+ TestCatalog::new().catalog(),
+ Arc::new(MockProvider::new(Time::MIN)),
+ None,
+ ),
);
assert_eq!(sink.to_string(), "kind([ObjectStore, OutOfMemory], mock)");
}
@@ -78,6 +109,11 @@ mod tests {
let sink = ErrorKindPartitionDoneSinkWrapper::new(
Arc::clone(&inner),
HashSet::from([ErrorKind::ObjectStore, ErrorKind::OutOfMemory]),
+ create_test_scheduler(
+ TestCatalog::new().catalog(),
+ Arc::new(MockProvider::new(Time::MIN)),
+ None,
+ ),
);
sink.record(
diff --git a/compactor/src/components/partition_done_sink/logging.rs b/compactor/src/components/partition_done_sink/logging.rs
index d4649567dc..a9fb53a327 100644
--- a/compactor/src/components/partition_done_sink/logging.rs
+++ b/compactor/src/components/partition_done_sink/logging.rs
@@ -1,13 +1,12 @@
use std::fmt::Display;
use async_trait::async_trait;
+use compactor_scheduler::PartitionDoneSink;
use data_types::PartitionId;
use observability_deps::tracing::{error, info};
use crate::error::{DynError, ErrorKindExt};
-use super::PartitionDoneSink;
-
#[derive(Debug)]
pub struct LoggingPartitionDoneSinkWrapper<T>
where
@@ -61,11 +60,10 @@ where
mod tests {
use std::{collections::HashMap, sync::Arc};
+ use compactor_scheduler::MockPartitionDoneSink;
use object_store::Error as ObjectStoreError;
use test_helpers::tracing::TracingCapture;
- use crate::components::partition_done_sink::mock::MockPartitionDoneSink;
-
use super::*;
#[test]
diff --git a/compactor/src/components/partition_done_sink/metrics.rs b/compactor/src/components/partition_done_sink/metrics.rs
index 1999e8e0f4..2a3c0163c4 100644
--- a/compactor/src/components/partition_done_sink/metrics.rs
+++ b/compactor/src/components/partition_done_sink/metrics.rs
@@ -1,13 +1,12 @@
use std::{collections::HashMap, fmt::Display};
use async_trait::async_trait;
+use compactor_scheduler::PartitionDoneSink;
use data_types::PartitionId;
use metric::{Registry, U64Counter};
use crate::error::{DynError, ErrorKind, ErrorKindExt};
-use super::PartitionDoneSink;
-
const METRIC_NAME_PARTITION_COMPLETE_COUNT: &str = "iox_compactor_partition_complete_count";
#[derive(Debug)]
@@ -83,11 +82,10 @@ where
mod tests {
use std::{collections::HashMap, sync::Arc};
+ use compactor_scheduler::MockPartitionDoneSink;
use metric::{assert_counter, Attributes};
use object_store::Error as ObjectStoreError;
- use crate::components::partition_done_sink::mock::MockPartitionDoneSink;
-
use super::*;
#[test]
diff --git a/compactor/src/components/partition_done_sink/mod.rs b/compactor/src/components/partition_done_sink/mod.rs
index 40d1772222..32f5aa961c 100644
--- a/compactor/src/components/partition_done_sink/mod.rs
+++ b/compactor/src/components/partition_done_sink/mod.rs
@@ -1,34 +1,4 @@
-use std::{
- fmt::{Debug, Display},
- sync::Arc,
-};
-
-use async_trait::async_trait;
-use data_types::PartitionId;
-
-use crate::error::DynError;
-
-pub mod catalog;
pub mod error_kind;
pub mod logging;
pub mod metrics;
-pub mod mock;
-
-/// Records "partition is done" status for given partition.
-#[async_trait]
-pub trait PartitionDoneSink: Debug + Display + Send + Sync {
- /// Record "partition is done" status for given partition.
- ///
- /// This method should retry.
- async fn record(&self, partition: PartitionId, res: Result<(), DynError>);
-}
-
-#[async_trait]
-impl<T> PartitionDoneSink for Arc<T>
-where
- T: PartitionDoneSink + ?Sized,
-{
- async fn record(&self, partition: PartitionId, res: Result<(), DynError>) {
- self.as_ref().record(partition, res).await
- }
-}
+pub mod outcome;
diff --git a/compactor/src/components/partition_done_sink/outcome.rs b/compactor/src/components/partition_done_sink/outcome.rs
new file mode 100644
index 0000000000..0f88f5c18c
--- /dev/null
+++ b/compactor/src/components/partition_done_sink/outcome.rs
@@ -0,0 +1,49 @@
+use std::{fmt::Display, sync::Arc};
+
+use async_trait::async_trait;
+use compactor_scheduler::{
+ CompactionJob, CompactionJobStatus, CompactionJobStatusResult, CompactionJobStatusVariant,
+ PartitionDoneSink, Scheduler, SkipReason,
+};
+use data_types::PartitionId;
+
+use crate::DynError;
+
+#[derive(Debug)]
+pub struct PartitionDoneSinkToScheduler {
+ scheduler: Arc<dyn Scheduler>,
+}
+
+impl PartitionDoneSinkToScheduler {
+ pub fn new(scheduler: Arc<dyn Scheduler>) -> Self {
+ Self { scheduler }
+ }
+}
+
+impl Display for PartitionDoneSinkToScheduler {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ write!(f, "PartitionDoneSinkToScheduler")
+ }
+}
+
+#[async_trait]
+impl PartitionDoneSink for PartitionDoneSinkToScheduler {
+ async fn record(&self, partition: PartitionId, res: Result<(), DynError>) {
+ let mut job_status = CompactionJobStatus {
+ job: CompactionJob::new(partition),
+ status: CompactionJobStatusVariant::Complete,
+ };
+ if let Err(e) = res {
+ job_status = CompactionJobStatus {
+ job: CompactionJob::new(partition),
+ status: CompactionJobStatusVariant::RequestToSkip(SkipReason::CompactionError(
+ e.to_string(),
+ )),
+ };
+ };
+ match self.scheduler.job_status(job_status).await {
+ Ok(CompactionJobStatusResult::Ack) => {}
+ _ => panic!("unexpected result from scheduler"),
+ }
+ }
+}
diff --git a/compactor/src/components/report.rs b/compactor/src/components/report.rs
index 0137447c45..d45b2664c1 100644
--- a/compactor/src/components/report.rs
+++ b/compactor/src/components/report.rs
@@ -32,7 +32,6 @@ pub fn log_config(config: &Config) {
min_num_l1_files_to_compact,
process_once,
parquet_files_sink_override,
- commit_wrapper,
simulate_without_object_store,
all_errors_are_fatal,
max_num_columns_per_table,
@@ -45,8 +44,6 @@ pub fn log_config(config: &Config) {
.map(|_| "Some")
.unwrap_or("None");
- let commit_wrapper = commit_wrapper.as_ref().map(|_| "Some").unwrap_or("None");
-
info!(
%catalog,
%scheduler_config,
@@ -69,7 +66,6 @@ pub fn log_config(config: &Config) {
process_once,
simulate_without_object_store,
%parquet_files_sink_override,
- %commit_wrapper,
all_errors_are_fatal,
max_num_columns_per_table,
max_num_files_per_plan,
diff --git a/compactor/src/config.rs b/compactor/src/config.rs
index 63a2cf0abe..389ce7e9ac 100644
--- a/compactor/src/config.rs
+++ b/compactor/src/config.rs
@@ -8,7 +8,7 @@ use iox_query::exec::Executor;
use iox_time::TimeProvider;
use parquet_file::storage::ParquetStorage;
-use crate::components::{commit::CommitWrapper, parquet_files_sink::ParquetFilesSink};
+use crate::components::parquet_files_sink::ParquetFilesSink;
/// Multiple from `max_desired_file_size_bytes` to compute the minimum value for
/// `max_compact_size_bytes`. Since `max_desired_file_size_bytes` is softly enforced, actual file
@@ -119,11 +119,6 @@ pub struct Config {
/// (used for testing)
pub parquet_files_sink_override: Option<Arc<dyn ParquetFilesSink>>,
- /// Optionally wrap the `Commit` instance
- ///
- /// This is mostly used for testing
- pub commit_wrapper: Option<Arc<dyn CommitWrapper>>,
-
/// Ensure that ALL errors (including object store errors) result in "skipped" partitions.
///
/// This is mostly useful for testing.
diff --git a/compactor/src/error.rs b/compactor/src/error.rs
index 3ae7be1b2b..a1d687165b 100644
--- a/compactor/src/error.rs
+++ b/compactor/src/error.rs
@@ -1,5 +1,6 @@
//! Error handling.
+use compactor_scheduler::ErrorKind as SchedulerErrorKind;
use datafusion::{arrow::error::ArrowError, error::DataFusionError, parquet::errors::ParquetError};
use object_store::Error as ObjectStoreError;
use std::{error::Error, fmt::Display, sync::Arc};
@@ -51,6 +52,17 @@ impl ErrorKind {
}
}
+impl From<ErrorKind> for SchedulerErrorKind {
+ fn from(e: ErrorKind) -> Self {
+ match e {
+ ErrorKind::ObjectStore => Self::ObjectStore,
+ ErrorKind::OutOfMemory => Self::OutOfMemory,
+ ErrorKind::Timeout => Self::Timeout,
+ ErrorKind::Unknown => Self::Unknown("".into()),
+ }
+ }
+}
+
impl Display for ErrorKind {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", self.name())
diff --git a/compactor/src/lib.rs b/compactor/src/lib.rs
index 711f6118b4..2e46324577 100644
--- a/compactor/src/lib.rs
+++ b/compactor/src/lib.rs
@@ -221,12 +221,8 @@ mod round_info;
// publically expose items needed for testing
pub use components::{
- commit::{Commit, CommitWrapper},
- df_planner::panic::PanicDataFusionPlanner,
- hardcoded::hardcoded_components,
- namespaces_source::mock::NamespaceWrapper,
- parquet_files_sink::ParquetFilesSink,
- Components,
+ df_planner::panic::PanicDataFusionPlanner, hardcoded::hardcoded_components,
+ namespaces_source::mock::NamespaceWrapper, parquet_files_sink::ParquetFilesSink, Components,
};
pub use driver::compact;
pub use error::DynError;
diff --git a/compactor_scheduler/Cargo.toml b/compactor_scheduler/Cargo.toml
index 426b5ab1ba..dae3b90adb 100644
--- a/compactor_scheduler/Cargo.toml
+++ b/compactor_scheduler/Cargo.toml
@@ -9,8 +9,11 @@ license.workspace = true
async-trait = "0.1.71"
backoff = { path = "../backoff" }
data_types = { path = "../data_types" }
+futures = "0.3"
iox_catalog = { path = "../iox_catalog" }
iox_time = { path = "../iox_time" }
+itertools = "0.11.0"
+metric = { path = "../metric" }
observability_deps = { path = "../observability_deps" }
sharder = { path = "../sharder" }
uuid = { version = "1", features = ["v4"] }
@@ -18,4 +21,5 @@ workspace-hack = { version = "0.1", path = "../workspace-hack" }
[dev-dependencies]
iox_tests = { path = "../iox_tests" }
+test_helpers = { path = "../test_helpers"}
tokio = { version = "1.29", features = ["macros", "net", "parking_lot", "rt-multi-thread", "signal", "sync", "time"] }
diff --git a/compactor/src/components/commit/logging.rs b/compactor_scheduler/src/commit/logging.rs
similarity index 97%
rename from compactor/src/components/commit/logging.rs
rename to compactor_scheduler/src/commit/logging.rs
index 0bd1789372..a2d97aed2a 100644
--- a/compactor/src/components/commit/logging.rs
+++ b/compactor_scheduler/src/commit/logging.rs
@@ -7,7 +7,7 @@ use observability_deps::tracing::info;
use super::Commit;
#[derive(Debug)]
-pub struct LoggingCommitWrapper<T>
+pub(crate) struct LoggingCommitWrapper<T>
where
T: Commit,
{
@@ -18,7 +18,7 @@ impl<T> LoggingCommitWrapper<T>
where
T: Commit,
{
- pub fn new(inner: T) -> Self {
+ pub(crate) fn new(inner: T) -> Self {
Self { inner }
}
}
@@ -83,7 +83,7 @@ mod tests {
use test_helpers::tracing::TracingCapture;
use super::*;
- use crate::components::commit::mock::{CommitHistoryEntry, MockCommit};
+ use crate::commit::mock::{CommitHistoryEntry, MockCommit};
use iox_tests::ParquetFileBuilder;
#[test]
diff --git a/compactor/src/components/commit/metrics.rs b/compactor_scheduler/src/commit/metrics.rs
similarity index 98%
rename from compactor/src/components/commit/metrics.rs
rename to compactor_scheduler/src/commit/metrics.rs
index 4a63cec426..37d141aa74 100644
--- a/compactor/src/components/commit/metrics.rs
+++ b/compactor_scheduler/src/commit/metrics.rs
@@ -102,7 +102,7 @@ impl Histogram {
}
#[derive(Debug)]
-pub struct MetricsCommitWrapper<T>
+pub(crate) struct MetricsCommitWrapper<T>
where
T: Commit,
{
@@ -124,7 +124,7 @@ impl<T> MetricsCommitWrapper<T>
where
T: Commit,
{
- pub fn new(inner: T, registry: &Registry) -> Self {
+ pub(crate) fn new(inner: T, registry: &Registry) -> Self {
Self {
file_bytes: Histogram::new(
registry,
@@ -307,7 +307,7 @@ mod tests {
use metric::{assert_histogram, Attributes};
- use crate::components::commit::mock::{CommitHistoryEntry, MockCommit};
+ use crate::commit::mock::{CommitHistoryEntry, MockCommit};
use iox_tests::ParquetFileBuilder;
use super::*;
diff --git a/compactor/src/components/commit/mock.rs b/compactor_scheduler/src/commit/mock.rs
similarity index 93%
rename from compactor/src/components/commit/mock.rs
rename to compactor_scheduler/src/commit/mock.rs
index 17a68c33b8..deee3592b3 100644
--- a/compactor/src/components/commit/mock.rs
+++ b/compactor_scheduler/src/commit/mock.rs
@@ -12,23 +12,23 @@ use data_types::{CompactionLevel, ParquetFile, ParquetFileId, ParquetFileParams,
use super::Commit;
#[derive(Debug, PartialEq, Eq, Clone)]
-pub struct CommitHistoryEntry {
- pub partition_id: PartitionId,
- pub delete: Vec<ParquetFile>,
- pub upgrade: Vec<ParquetFile>,
- pub created: Vec<ParquetFile>,
- pub target_level: CompactionLevel,
+pub(crate) struct CommitHistoryEntry {
+ pub(crate) partition_id: PartitionId,
+ pub(crate) delete: Vec<ParquetFile>,
+ pub(crate) upgrade: Vec<ParquetFile>,
+ pub(crate) created: Vec<ParquetFile>,
+ pub(crate) target_level: CompactionLevel,
}
-#[derive(Debug)]
-pub struct MockCommit {
+#[derive(Debug, Default)]
+pub(crate) struct MockCommit {
history: Mutex<Vec<CommitHistoryEntry>>,
id_counter: AtomicI64,
}
impl MockCommit {
#[allow(dead_code)] // not used anywhere
- pub fn new() -> Self {
+ pub(crate) fn new() -> Self {
Self {
history: Default::default(),
id_counter: AtomicI64::new(1000),
@@ -36,7 +36,7 @@ impl MockCommit {
}
#[allow(dead_code)] // not used anywhere
- pub fn history(&self) -> Vec<CommitHistoryEntry> {
+ pub(crate) fn history(&self) -> Vec<CommitHistoryEntry> {
self.history.lock().expect("not poisoned").clone()
}
}
diff --git a/compactor/src/components/commit/mod.rs b/compactor_scheduler/src/commit/mod.rs
similarity index 95%
rename from compactor/src/components/commit/mod.rs
rename to compactor_scheduler/src/commit/mod.rs
index de777ccefd..c1d90a7d1e 100644
--- a/compactor/src/components/commit/mod.rs
+++ b/compactor_scheduler/src/commit/mod.rs
@@ -6,10 +6,9 @@ use std::{
use async_trait::async_trait;
use data_types::{CompactionLevel, ParquetFile, ParquetFileId, ParquetFileParams, PartitionId};
-pub mod catalog;
-pub mod logging;
-pub mod metrics;
-pub mod mock;
+pub(crate) mod logging;
+pub(crate) mod metrics;
+pub(crate) mod mock;
/// Ensures that the file change (i.e. deletion and creation) are committed to the catalog.
#[async_trait]
diff --git a/compactor_scheduler/src/error.rs b/compactor_scheduler/src/error.rs
new file mode 100644
index 0000000000..7426de087d
--- /dev/null
+++ b/compactor_scheduler/src/error.rs
@@ -0,0 +1,37 @@
+//! Error classification.
+
+/// What kind of error did we occur during compaction?
+#[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)]
+pub enum ErrorKind {
+ /// Could not access the object store.
+ ObjectStore,
+
+ /// We ran out of memory (OOM).
+ OutOfMemory,
+
+ /// Partition took too long.
+ Timeout,
+
+ /// Unknown/unexpected error.
+ ///
+ /// This will likely mark the affected partition as "skipped" and the compactor will no longer touch it.
+ Unknown(String),
+}
+
+impl ErrorKind {
+ /// Return static name.
+ pub fn name(&self) -> &'static str {
+ match self {
+ Self::ObjectStore => "object_store",
+ Self::OutOfMemory => "out_of_memory",
+ Self::Timeout => "timeout",
+ Self::Unknown(_) => "unknown",
+ }
+ }
+}
+
+impl std::fmt::Display for ErrorKind {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ write!(f, "{}", self.name())
+ }
+}
diff --git a/compactor_scheduler/src/lib.rs b/compactor_scheduler/src/lib.rs
index 7d7d35e7d4..1033364fe5 100644
--- a/compactor_scheduler/src/lib.rs
+++ b/compactor_scheduler/src/lib.rs
@@ -26,16 +26,26 @@ use iox_time::TimeProvider;
// Workaround for "unused crate" lint false positives.
use workspace_hack as _;
+pub(crate) mod commit;
+pub(crate) use commit::mock::MockCommit;
+pub use commit::{Commit, CommitWrapper};
+
+mod error;
+pub use error::ErrorKind;
+
mod local_scheduler;
pub(crate) use local_scheduler::{id_only_partition_filter::IdOnlyPartitionFilter, LocalScheduler};
-// configurations used externally during scheduler setup
pub use local_scheduler::{
- partitions_source_config::PartitionsSourceConfig, shard_config::ShardConfig,
+ partition_done_sink::{mock::MockPartitionDoneSink, PartitionDoneSink},
+ partitions_source_config::PartitionsSourceConfig,
+ shard_config::ShardConfig,
LocalSchedulerConfig,
};
+
// partitions_source trait
mod partitions_source;
pub use partitions_source::*;
+
// scheduler trait and associated types
mod scheduler;
pub use scheduler::*;
@@ -49,6 +59,8 @@ pub fn create_scheduler(
config: SchedulerConfig,
catalog: Arc<dyn Catalog>,
time_provider: Arc<dyn TimeProvider>,
+ metrics: Arc<metric::Registry>,
+ shadow_mode: bool,
) -> Arc<dyn Scheduler> {
match config {
SchedulerConfig::Local(scheduler_config) => {
@@ -57,6 +69,8 @@ pub fn create_scheduler(
BackoffConfig::default(),
catalog,
time_provider,
+ metrics,
+ shadow_mode,
);
Arc::new(scheduler)
}
@@ -75,13 +89,20 @@ pub fn create_test_scheduler(
let scheduler_config = match mocked_partition_ids {
None => SchedulerConfig::default(),
Some(partition_ids) => SchedulerConfig::Local(LocalSchedulerConfig {
+ commit_wrapper: None,
partitions_source_config: PartitionsSourceConfig::Fixed(
partition_ids.into_iter().collect::<HashSet<PartitionId>>(),
),
shard_config: None,
}),
};
- create_scheduler(scheduler_config, catalog, time_provider)
+ create_scheduler(
+ scheduler_config,
+ catalog,
+ time_provider,
+ Arc::new(metric::Registry::default()),
+ false,
+ )
}
#[cfg(test)]
@@ -111,7 +132,7 @@ mod tests {
}
#[tokio::test]
- async fn test_test_scheduler_with_mocked_parition_ids() {
+ async fn test_test_scheduler_with_mocked_partition_ids() {
let partitions = vec![PartitionId::new(0), PartitionId::new(1234242)];
let scheduler = create_test_scheduler(
diff --git a/compactor_scheduler/src/local_scheduler.rs b/compactor_scheduler/src/local_scheduler.rs
index 2aa917ef1f..f22d836100 100644
--- a/compactor_scheduler/src/local_scheduler.rs
+++ b/compactor_scheduler/src/local_scheduler.rs
@@ -1,26 +1,36 @@
//! Internals used by [`LocalScheduler`].
+pub(crate) mod catalog_commit;
+pub(crate) mod combos;
pub(crate) mod id_only_partition_filter;
+pub(crate) mod partition_done_sink;
pub(crate) mod partitions_source;
pub(crate) mod partitions_source_config;
pub(crate) mod shard_config;
-use std::sync::Arc;
+use std::{sync::Arc, time::Duration};
use async_trait::async_trait;
use backoff::BackoffConfig;
use iox_catalog::interface::Catalog;
use iox_time::TimeProvider;
-use observability_deps::tracing::info;
+use observability_deps::tracing::{info, warn};
use crate::{
- CompactionJob, MockPartitionsSource, PartitionsSource, PartitionsSourceConfig, Scheduler,
- ShardConfig,
+ commit::{logging::LoggingCommitWrapper, metrics::MetricsCommitWrapper},
+ Commit, CommitUpdate, CommitWrapper, CompactionJob, CompactionJobStatus,
+ CompactionJobStatusResult, CompactionJobStatusVariant, MockCommit, MockPartitionsSource,
+ PartitionsSource, PartitionsSourceConfig, Scheduler, ShardConfig, SkipReason,
};
use self::{
+ catalog_commit::CatalogCommit,
+ combos::{throttle_partition::throttle_partition, unique_partitions::unique_partitions},
id_only_partition_filter::{
and::AndIdOnlyPartitionFilter, shard::ShardPartitionFilter, IdOnlyPartitionFilter,
},
+ partition_done_sink::{
+ catalog::CatalogPartitionDoneSink, mock::MockPartitionDoneSink, PartitionDoneSink,
+ },
partitions_source::{
catalog_all::CatalogAllPartitionsSource,
catalog_to_compact::CatalogToCompactPartitionsSource,
@@ -31,6 +41,10 @@ use self::{
/// Configuration specific to the local scheduler.
#[derive(Debug, Default, Clone)]
pub struct LocalSchedulerConfig {
+ /// Optionally wrap the `Commit` instance
+ ///
+ /// This is mostly used for testing
+ pub commit_wrapper: Option<Arc<dyn CommitWrapper>>,
/// The partitions source config used by the local sceduler.
pub partitions_source_config: PartitionsSourceConfig,
/// The shard config used by the local sceduler.
@@ -40,8 +54,14 @@ pub struct LocalSchedulerConfig {
/// Implementation of the scheduler for local (per compactor) scheduling.
#[derive(Debug)]
pub(crate) struct LocalScheduler {
+ /// Commits changes (i.e. deletion and creation) to the catalog
+ pub(crate) commit: Arc<dyn Commit>,
/// The partitions source to use for scheduling.
partitions_source: Arc<dyn PartitionsSource>,
+ /// The actions to take when a partition is done.
+ ///
+ /// Includes partition (PartitionId) tracking of uniqueness and throttling.
+ partition_done_sink: Arc<dyn PartitionDoneSink>,
/// The shard config used for generating the PartitionsSource.
shard_config: Option<ShardConfig>,
}
@@ -53,7 +73,47 @@ impl LocalScheduler {
backoff_config: BackoffConfig,
catalog: Arc<dyn Catalog>,
time_provider: Arc<dyn TimeProvider>,
+ metrics: Arc<metric::Registry>,
+ shadow_mode: bool,
) -> Self {
+ let commit = Self::build_commit(
+ config.clone(),
+ backoff_config.clone(),
+ Arc::clone(&catalog),
+ metrics,
+ shadow_mode,
+ );
+
+ let partitions_source = Self::build_partitions_source(
+ config.clone(),
+ backoff_config.clone(),
+ Arc::clone(&catalog),
+ Arc::clone(&time_provider),
+ );
+
+ let (partitions_source, commit, partition_done_sink) = Self::build_partition_done_sink(
+ partitions_source,
+ commit,
+ backoff_config,
+ catalog,
+ time_provider,
+ shadow_mode,
+ );
+
+ Self {
+ commit,
+ partitions_source,
+ partition_done_sink,
+ shard_config: config.shard_config,
+ }
+ }
+
+ fn build_partitions_source(
+ config: LocalSchedulerConfig,
+ backoff_config: BackoffConfig,
+ catalog: Arc<dyn Catalog>,
+ time_provider: Arc<dyn TimeProvider>,
+ ) -> Arc<dyn PartitionsSource> {
let shard_config = config.shard_config;
let partitions_source: Arc<dyn PartitionsSource> = match &config.partitions_source_config {
PartitionsSourceConfig::CatalogRecentWrites { threshold } => {
@@ -86,16 +146,75 @@ impl LocalScheduler {
shard_config.shard_id,
)));
}
- let partitions_source: Arc<dyn PartitionsSource> =
- Arc::new(FilterPartitionsSourceWrapper::new(
- AndIdOnlyPartitionFilter::new(id_only_partition_filters),
- partitions_source,
- ));
+ Arc::new(FilterPartitionsSourceWrapper::new(
+ AndIdOnlyPartitionFilter::new(id_only_partition_filters),
+ partitions_source,
+ ))
+ }
- Self {
+ fn build_partition_done_sink(
+ partitions_source: Arc<dyn PartitionsSource>,
+ commit: Arc<dyn Commit>,
+ backoff_config: BackoffConfig,
+ catalog: Arc<dyn Catalog>,
+ time_provider: Arc<dyn TimeProvider>,
+ shadow_mode: bool,
+ ) -> (
+ Arc<dyn PartitionsSource>,
+ Arc<dyn Commit>,
+ Arc<dyn PartitionDoneSink>,
+ ) {
+ let partition_done_sink: Arc<dyn PartitionDoneSink> = if shadow_mode {
+ Arc::new(MockPartitionDoneSink::new())
+ } else {
+ Arc::new(CatalogPartitionDoneSink::new(
+ backoff_config,
+ Arc::clone(&catalog),
+ ))
+ };
+
+ let (partitions_source, partition_done_sink) =
+ unique_partitions(partitions_source, partition_done_sink, 1);
+
+ let (partitions_source, commit, partition_done_sink) = throttle_partition(
partitions_source,
- shard_config,
- }
+ commit,
+ partition_done_sink,
+ Arc::clone(&time_provider),
+ Duration::from_secs(60),
+ 1,
+ );
+
+ (
+ Arc::new(partitions_source),
+ Arc::new(commit),
+ Arc::new(partition_done_sink),
+ )
+ }
+
+ fn build_commit(
+ config: LocalSchedulerConfig,
+ backoff_config: BackoffConfig,
+ catalog: Arc<dyn Catalog>,
+ metrics_registry: Arc<metric::Registry>,
+ shadow_mode: bool,
+ ) -> Arc<dyn Commit> {
+ let commit: Arc<dyn Commit> = if shadow_mode {
+ Arc::new(MockCommit::new())
+ } else {
+ Arc::new(CatalogCommit::new(backoff_config, Arc::clone(&catalog)))
+ };
+
+ let commit = if let Some(commit_wrapper) = &config.commit_wrapper {
+ commit_wrapper.wrap(commit)
+ } else {
+ commit
+ };
+
+ Arc::new(LoggingCommitWrapper::new(MetricsCommitWrapper::new(
+ commit,
+ &metrics_registry,
+ )))
}
}
@@ -109,6 +228,48 @@ impl Scheduler for LocalScheduler {
.map(CompactionJob::new)
.collect()
}
+
+ async fn job_status(
+ &self,
+ job_status: CompactionJobStatus,
+ ) -> Result<CompactionJobStatusResult, Box<dyn std::error::Error>> {
+ match job_status.status {
+ CompactionJobStatusVariant::Update(commit_update) => {
+ let CommitUpdate {
+ partition_id,
+ delete,
+ upgrade,
+ target_level,
+ create,
+ } = commit_update;
+
+ let result = self
+ .commit
+ .commit(partition_id, &delete, &upgrade, &create, target_level)
+ .await;
+
+ // verify create commit counts
+ assert_eq!(result.len(), create.len());
+
+ Ok(CompactionJobStatusResult::UpdatedParquetFiles(result))
+ }
+ CompactionJobStatusVariant::RequestToSkip(SkipReason::CompactionError(msg)) => {
+ self.partition_done_sink
+ .record(job_status.job.partition_id, Err(msg.into()))
+ .await;
+
+ Ok(CompactionJobStatusResult::Ack)
+ }
+ CompactionJobStatusVariant::Error(error_kind) => {
+ warn!("Error processing job: {:?}: {}", job_status.job, error_kind);
+ Ok(CompactionJobStatusResult::Ack)
+ }
+ CompactionJobStatusVariant::Complete => {
+ // TODO: once uuid is handled properly, we can track the job completion
+ Ok(CompactionJobStatusResult::Ack)
+ }
+ }
+ }
}
impl std::fmt::Display for LocalScheduler {
@@ -122,7 +283,10 @@ impl std::fmt::Display for LocalScheduler {
#[cfg(test)]
mod tests {
- use iox_tests::TestCatalog;
+ use std::collections::HashSet;
+
+ use data_types::{ColumnType, PartitionId};
+ use iox_tests::{ParquetFileBuilder, TestCatalog, TestParquetFile, TestParquetFileBuilder};
use iox_time::{MockProvider, Time};
use super::*;
@@ -134,6 +298,8 @@ mod tests {
BackoffConfig::default(),
TestCatalog::new().catalog(),
Arc::new(MockProvider::new(Time::MIN)),
+ Arc::new(metric::Registry::default()),
+ false,
);
assert_eq!(scheduler.to_string(), "local_compaction_scheduler",);
@@ -147,6 +313,7 @@ mod tests {
});
let config = LocalSchedulerConfig {
+ commit_wrapper: None,
partitions_source_config: PartitionsSourceConfig::default(),
shard_config,
};
@@ -156,6 +323,8 @@ mod tests {
BackoffConfig::default(),
TestCatalog::new().catalog(),
Arc::new(MockProvider::new(Time::MIN)),
+ Arc::new(metric::Registry::default()),
+ false,
);
assert_eq!(
@@ -163,4 +332,245 @@ mod tests {
"local_compaction_scheduler(shard_cfg(n_shards=2,shard_id=1))",
);
}
+
+ async fn create_scheduler_with_partitions() -> (LocalScheduler, TestParquetFile, TestParquetFile)
+ {
+ let catalog = TestCatalog::new();
+ let ns = catalog.create_namespace_with_retention("ns", None).await;
+ let table = ns.create_table("table1").await;
+ table.create_column("time", ColumnType::Time).await;
+ table.create_column("load", ColumnType::F64).await;
+
+ let partition1 = table.create_partition("k").await;
+ let partition2 = table.create_partition("k").await;
+ let partition_ids = vec![partition1.partition.id, partition2.partition.id];
+
+ // two files on partition1, to be replaced by one compacted file
+ let file_builder = TestParquetFileBuilder::default().with_line_protocol("table1 load=1 11");
+ let file1_1 = partition1.create_parquet_file(file_builder.clone()).await;
+ let file1_2 = partition1.create_parquet_file(file_builder).await;
+
+ let config = LocalSchedulerConfig {
+ commit_wrapper: None,
+ partitions_source_config: PartitionsSourceConfig::Fixed(
+ partition_ids.into_iter().collect::<HashSet<PartitionId>>(),
+ ),
+ shard_config: None,
+ };
+
+ let scheduler = LocalScheduler::new(
+ config,
+ BackoffConfig::default(),
+ catalog.catalog(),
+ Arc::new(MockProvider::new(Time::MIN)),
+ Arc::new(metric::Registry::default()),
+ false,
+ );
+
+ (scheduler, file1_1, file1_2)
+ }
+
+ #[tokio::test]
+ #[should_panic]
+ async fn test_status_update_none_should_panic() {
+ test_helpers::maybe_start_logging();
+
+ let (scheduler, _, _) = create_scheduler_with_partitions().await;
+ let jobs = scheduler.get_jobs().await;
+
+ for job in jobs {
+ let commit_update = CommitUpdate {
+ partition_id: job.partition_id,
+ delete: vec![],
+ upgrade: vec![],
+ target_level: data_types::CompactionLevel::Final,
+ create: vec![],
+ };
+
+ let _ = scheduler
+ .job_status(CompactionJobStatus {
+ job,
+ status: CompactionJobStatusVariant::Update(commit_update),
+ })
+ .await;
+ }
+ }
+
+ #[tokio::test]
+ async fn test_status_update_replacement() {
+ test_helpers::maybe_start_logging();
+
+ let (scheduler, existing_1, existing_2) = create_scheduler_with_partitions().await;
+ let jobs = scheduler.get_jobs().await;
+ let job = jobs
+ .into_iter()
+ .find(|job| job.partition_id == existing_1.partition.partition.id)
+ .unwrap();
+
+ let created = ParquetFileBuilder::new(1002)
+ .with_partition(job.partition_id.get())
+ .build();
+
+ let commit_update = CommitUpdate {
+ partition_id: job.partition_id,
+ delete: vec![existing_1.into(), existing_2.into()],
+ upgrade: vec![],
+ target_level: data_types::CompactionLevel::Final,
+ create: vec![created.into()],
+ };
+
+ assert!(scheduler
+ .job_status(CompactionJobStatus {
+ job,
+ status: CompactionJobStatusVariant::Update(commit_update),
+ })
+ .await
+ .is_ok());
+ }
+
+ #[tokio::test]
+ #[should_panic]
+ async fn test_status_update_replacement_args_incomplete() {
+ test_helpers::maybe_start_logging();
+
+ let (scheduler, _, _) = create_scheduler_with_partitions().await;
+ let jobs = scheduler.get_jobs().await;
+
+ for job in jobs {
+ let created_1 = ParquetFileBuilder::new(1002)
+ .with_partition(job.partition_id.get())
+ .build();
+
+ let commit_update = CommitUpdate {
+ partition_id: job.partition_id,
+ delete: vec![],
+ upgrade: vec![],
+ target_level: data_types::CompactionLevel::Final,
+ create: vec![created_1.into()],
+ };
+
+ let _ = scheduler
+ .job_status(CompactionJobStatus {
+ job,
+ status: CompactionJobStatusVariant::Update(commit_update),
+ })
+ .await;
+ }
+ }
+
+ #[tokio::test]
+ async fn test_status_update_upgrade() {
+ test_helpers::maybe_start_logging();
+
+ let (scheduler, existing_1, existing_2) = create_scheduler_with_partitions().await;
+ let jobs = scheduler.get_jobs().await;
+ let job = jobs
+ .into_iter()
+ .find(|job| job.partition_id == existing_1.partition.partition.id)
+ .unwrap();
+
+ let commit_update = CommitUpdate {
+ partition_id: job.partition_id,
+ delete: vec![],
+ upgrade: vec![existing_1.into(), existing_2.into()],
+ target_level: data_types::CompactionLevel::Final,
+ create: vec![],
+ };
+
+ assert!(scheduler
+ .job_status(CompactionJobStatus {
+ job,
+ status: CompactionJobStatusVariant::Update(commit_update),
+ })
+ .await
+ .is_ok());
+ }
+
+ #[tokio::test]
+ async fn test_status_update_can_replace_and_upgrade_at_once() {
+ test_helpers::maybe_start_logging();
+
+ let (scheduler, existing_1, existing_2) = create_scheduler_with_partitions().await;
+ let jobs = scheduler.get_jobs().await;
+ let job = jobs
+ .into_iter()
+ .find(|job| job.partition_id == existing_1.partition.partition.id)
+ .unwrap();
+
+ let created = ParquetFileBuilder::new(1002)
+ .with_partition(job.partition_id.get())
+ .build();
+
+ let commit_update = CommitUpdate {
+ partition_id: job.partition_id,
+ delete: vec![existing_1.into()],
+ upgrade: vec![existing_2.into()],
+ target_level: data_types::CompactionLevel::Final,
+ create: vec![created.into()],
+ };
+
+ assert!(scheduler
+ .job_status(CompactionJobStatus {
+ job,
+ status: CompactionJobStatusVariant::Update(commit_update),
+ })
+ .await
+ .is_ok());
+ }
+
+ #[tokio::test]
+ async fn test_status_skip() {
+ test_helpers::maybe_start_logging();
+
+ let (scheduler, _, _) = create_scheduler_with_partitions().await;
+ let jobs = scheduler.get_jobs().await;
+
+ for job in jobs {
+ assert!(scheduler
+ .job_status(CompactionJobStatus {
+ job,
+ status: CompactionJobStatusVariant::RequestToSkip(SkipReason::CompactionError(
+ "some error".into()
+ )),
+ })
+ .await
+ .is_ok());
+ }
+ }
+
+ #[tokio::test]
+ async fn test_status_error() {
+ test_helpers::maybe_start_logging();
+
+ let (scheduler, _, _) = create_scheduler_with_partitions().await;
+ let jobs = scheduler.get_jobs().await;
+
+ for job in jobs {
+ assert!(scheduler
+ .job_status(CompactionJobStatus {
+ job,
+ status: CompactionJobStatusVariant::Error(crate::ErrorKind::OutOfMemory),
+ })
+ .await
+ .is_ok());
+ }
+ }
+
+ #[tokio::test]
+ async fn test_status_complete() {
+ test_helpers::maybe_start_logging();
+
+ let (scheduler, _, _) = create_scheduler_with_partitions().await;
+ let jobs = scheduler.get_jobs().await;
+
+ for job in jobs {
+ assert!(scheduler
+ .job_status(CompactionJobStatus {
+ job,
+ status: CompactionJobStatusVariant::Complete,
+ })
+ .await
+ .is_ok());
+ }
+ }
}
diff --git a/compactor/src/components/commit/catalog.rs b/compactor_scheduler/src/local_scheduler/catalog_commit.rs
similarity index 91%
rename from compactor/src/components/commit/catalog.rs
rename to compactor_scheduler/src/local_scheduler/catalog_commit.rs
index 97d9eb1415..d71474610d 100644
--- a/compactor/src/components/commit/catalog.rs
+++ b/compactor_scheduler/src/local_scheduler/catalog_commit.rs
@@ -5,16 +5,16 @@ use backoff::{Backoff, BackoffConfig};
use data_types::{CompactionLevel, ParquetFile, ParquetFileId, ParquetFileParams, PartitionId};
use iox_catalog::interface::Catalog;
-use super::Commit;
+use crate::Commit;
#[derive(Debug)]
-pub struct CatalogCommit {
+pub(crate) struct CatalogCommit {
backoff_config: BackoffConfig,
catalog: Arc<dyn Catalog>,
}
impl CatalogCommit {
- pub fn new(backoff_config: BackoffConfig, catalog: Arc<dyn Catalog>) -> Self {
+ pub(crate) fn new(backoff_config: BackoffConfig, catalog: Arc<dyn Catalog>) -> Self {
Self {
backoff_config,
catalog,
diff --git a/compactor/src/components/combos/mod.rs b/compactor_scheduler/src/local_scheduler/combos/mod.rs
similarity index 59%
rename from compactor/src/components/combos/mod.rs
rename to compactor_scheduler/src/local_scheduler/combos/mod.rs
index efaf6439ec..b52a36ef14 100644
--- a/compactor/src/components/combos/mod.rs
+++ b/compactor_scheduler/src/local_scheduler/combos/mod.rs
@@ -1,7 +1,7 @@
//! Combinations of multiple components that together can achieve one goal.
-pub mod throttle_partition;
-pub mod unique_partitions;
+pub(crate) mod throttle_partition;
+pub(crate) mod unique_partitions;
#[cfg(test)]
mod tests;
diff --git a/compactor/src/components/combos/tests.rs b/compactor_scheduler/src/local_scheduler/combos/tests.rs
similarity index 84%
rename from compactor/src/components/combos/tests.rs
rename to compactor_scheduler/src/local_scheduler/combos/tests.rs
index bf5d3cafb1..4c52070997 100644
--- a/compactor/src/components/combos/tests.rs
+++ b/compactor_scheduler/src/local_scheduler/combos/tests.rs
@@ -1,15 +1,15 @@
use std::{sync::Arc, time::Duration};
-use compactor_scheduler::{MockPartitionsSource, PartitionsSource};
use data_types::{CompactionLevel, PartitionId};
use iox_time::{MockProvider, Time};
-use crate::components::{
- combos::{throttle_partition::throttle_partition, unique_partitions::unique_partitions},
- commit::{mock::MockCommit, Commit},
- partition_done_sink::{mock::MockPartitionDoneSink, PartitionDoneSink},
+use crate::{
+ Commit, MockCommit, MockPartitionDoneSink, MockPartitionsSource, PartitionDoneSink,
+ PartitionsSource,
};
+use super::{throttle_partition::throttle_partition, unique_partitions::unique_partitions};
+
#[tokio::test]
async fn test_unique_and_throttle() {
let inner_source = Arc::new(MockPartitionsSource::new(vec![
diff --git a/compactor/src/components/combos/throttle_partition.rs b/compactor_scheduler/src/local_scheduler/combos/throttle_partition.rs
similarity index 96%
rename from compactor/src/components/combos/throttle_partition.rs
rename to compactor_scheduler/src/local_scheduler/combos/throttle_partition.rs
index d108807a1e..624a1f5532 100644
--- a/compactor/src/components/combos/throttle_partition.rs
+++ b/compactor_scheduler/src/local_scheduler/combos/throttle_partition.rs
@@ -8,12 +8,11 @@ use std::{
};
use async_trait::async_trait;
-use compactor_scheduler::PartitionsSource;
use data_types::{CompactionLevel, ParquetFile, ParquetFileId, ParquetFileParams, PartitionId};
use futures::StreamExt;
use iox_time::{Time, TimeProvider};
-use crate::components::{commit::Commit, partition_done_sink::PartitionDoneSink};
+use crate::{Commit, PartitionDoneSink, PartitionsSource};
/// Ensures that partitions that do not receive any commits are throttled.
///
@@ -54,8 +53,8 @@ use crate::components::{commit::Commit, partition_done_sink::PartitionDoneSink};
/// concurrency of this bypass can be controlled via `bypass_concurrency`.
///
/// This setup relies on a fact that it does not process duplicate [`PartitionId`]. You may use
-/// [`unique_partitions`](crate::components::combos::unique_partitions::unique_partitions) to achieve that.
-pub fn throttle_partition<T1, T2, T3>(
+/// [`unique_partitions`](super::unique_partitions::unique_partitions) to achieve that.
+pub(crate) fn throttle_partition<T1, T2, T3>(
source: T1,
commit: T2,
sink: T3,
@@ -107,7 +106,7 @@ struct State {
type SharedState = Arc<Mutex<State>>;
#[derive(Debug)]
-pub struct ThrottlePartitionsSourceWrapper<T1, T2>
+pub(crate) struct ThrottlePartitionsSourceWrapper<T1, T2>
where
T1: PartitionsSource,
T2: PartitionDoneSink,
@@ -188,7 +187,7 @@ where
}
#[derive(Debug)]
-pub struct ThrottleCommitWrapper<T>
+pub(crate) struct ThrottleCommitWrapper<T>
where
T: Commit,
{
@@ -241,7 +240,7 @@ where
}
#[derive(Debug)]
-pub struct ThrottlePartitionDoneSinkWrapper<T>
+pub(crate) struct ThrottlePartitionDoneSinkWrapper<T>
where
T: PartitionDoneSink,
{
@@ -296,12 +295,10 @@ where
#[cfg(test)]
mod tests {
- use compactor_scheduler::MockPartitionsSource;
use iox_time::MockProvider;
- use crate::components::{
- commit::mock::{CommitHistoryEntry, MockCommit},
- partition_done_sink::mock::MockPartitionDoneSink,
+ use crate::{
+ commit::mock::CommitHistoryEntry, MockCommit, MockPartitionDoneSink, MockPartitionsSource,
};
use super::*;
diff --git a/compactor/src/components/combos/unique_partitions.rs b/compactor_scheduler/src/local_scheduler/combos/unique_partitions.rs
similarity index 95%
rename from compactor/src/components/combos/unique_partitions.rs
rename to compactor_scheduler/src/local_scheduler/combos/unique_partitions.rs
index 747fd3b512..3220645dfe 100644
--- a/compactor/src/components/combos/unique_partitions.rs
+++ b/compactor_scheduler/src/local_scheduler/combos/unique_partitions.rs
@@ -7,11 +7,10 @@ use std::{
};
use async_trait::async_trait;
-use compactor_scheduler::PartitionsSource;
use data_types::PartitionId;
use futures::StreamExt;
-use crate::components::partition_done_sink::PartitionDoneSink;
+use crate::{PartitionDoneSink, PartitionsSource};
/// Ensures that a unique set of partitions is flowing through the critical section of the compactor pipeline.
///
@@ -32,7 +31,7 @@ use crate::components::partition_done_sink::PartitionDoneSink;
///
/// | Step | Name | Type | Description |
/// | ---- | --------------------- | ----------------------------------------------------------- | ----------- |
-/// | 1 | **Actual source** | `inner_source`/`T1`/[`PartitionsSource`], wrapped | This is the actual source, e.g. a [schedule](crate::components::partitions_source::scheduled::ScheduledPartitionsSource) |
+/// | 1 | **Actual source** | `inner_source`/`T1`/[`PartitionsSource`], wrapped | This is the actual source, e.g. a [schedule](crate::PartitionsSource) |
/// | 2 | **Unique IDs source** | [`UniquePartionsSourceWrapper`], wraps `inner_source`/`T1` | Outputs that [`PartitionId`]s from the `inner_source` but filters out partitions that have not yet reached the uniqueness sink (step 4) |
/// | 3 | **Critical section** | -- | Here it is always ensured that a single [`PartitionId`] does NOT occur more than once. |
/// | 4 | **Unique IDs sink** | [`UniquePartitionDoneSinkWrapper`], wraps `inner_sink`/`T2` | Observes incoming IDs and removes them from the filter applied in step 2. |
@@ -41,7 +40,7 @@ use crate::components::partition_done_sink::PartitionDoneSink;
/// Note that partitions filtered out by [`UniquePartionsSourceWrapper`] will directly be forwarded to `inner_sink`. No
/// partition is ever lost. This means that `inner_source` and `inner_sink` can perform proper accounting. The
/// concurrency of this bypass can be controlled via `bypass_concurrency`.
-pub fn unique_partitions<T1, T2>(
+pub(crate) fn unique_partitions<T1, T2>(
inner_source: T1,
inner_sink: T2,
bypass_concurrency: usize,
@@ -71,7 +70,7 @@ where
type InFlight = Arc<Mutex<HashSet<PartitionId>>>;
#[derive(Debug)]
-pub struct UniquePartionsSourceWrapper<T1, T2>
+pub(crate) struct UniquePartionsSourceWrapper<T1, T2>
where
T1: PartitionsSource,
T2: PartitionDoneSink,
@@ -128,7 +127,7 @@ where
}
#[derive(Debug)]
-pub struct UniquePartitionDoneSinkWrapper<T>
+pub(crate) struct UniquePartitionDoneSinkWrapper<T>
where
T: PartitionDoneSink,
{
@@ -180,9 +179,7 @@ where
mod tests {
use std::collections::HashMap;
- use compactor_scheduler::MockPartitionsSource;
-
- use crate::components::partition_done_sink::mock::MockPartitionDoneSink;
+ use crate::{MockPartitionDoneSink, MockPartitionsSource};
use super::*;
diff --git a/compactor/src/components/partition_done_sink/catalog.rs b/compactor_scheduler/src/local_scheduler/partition_done_sink/catalog.rs
similarity index 88%
rename from compactor/src/components/partition_done_sink/catalog.rs
rename to compactor_scheduler/src/local_scheduler/partition_done_sink/catalog.rs
index ab384d0ac0..4172391ec7 100644
--- a/compactor/src/components/partition_done_sink/catalog.rs
+++ b/compactor_scheduler/src/local_scheduler/partition_done_sink/catalog.rs
@@ -5,18 +5,16 @@ use backoff::{Backoff, BackoffConfig};
use data_types::PartitionId;
use iox_catalog::interface::Catalog;
-use crate::error::DynError;
-
-use super::PartitionDoneSink;
+use super::{DynError, PartitionDoneSink};
#[derive(Debug)]
-pub struct CatalogPartitionDoneSink {
+pub(crate) struct CatalogPartitionDoneSink {
backoff_config: BackoffConfig,
catalog: Arc<dyn Catalog>,
}
impl CatalogPartitionDoneSink {
- pub fn new(backoff_config: BackoffConfig, catalog: Arc<dyn Catalog>) -> Self {
+ pub(crate) fn new(backoff_config: BackoffConfig, catalog: Arc<dyn Catalog>) -> Self {
Self {
backoff_config,
catalog,
diff --git a/compactor/src/components/partition_done_sink/mock.rs b/compactor_scheduler/src/local_scheduler/partition_done_sink/mock.rs
similarity index 92%
rename from compactor/src/components/partition_done_sink/mock.rs
rename to compactor_scheduler/src/local_scheduler/partition_done_sink/mock.rs
index b906a8b520..29673f579e 100644
--- a/compactor/src/components/partition_done_sink/mock.rs
+++ b/compactor_scheduler/src/local_scheduler/partition_done_sink/mock.rs
@@ -3,21 +3,21 @@ use std::{collections::HashMap, fmt::Display, sync::Mutex};
use async_trait::async_trait;
use data_types::PartitionId;
-use crate::error::DynError;
-
-use super::PartitionDoneSink;
+use super::{DynError, PartitionDoneSink};
+/// Mock for [`PartitionDoneSink`].
#[derive(Debug, Default)]
pub struct MockPartitionDoneSink {
last: Mutex<HashMap<PartitionId, Result<(), String>>>,
}
impl MockPartitionDoneSink {
+ /// Create new mock.
pub fn new() -> Self {
Self::default()
}
- #[allow(dead_code)] // not used anywhere
+ /// Get the last recorded results.
pub fn results(&self) -> HashMap<PartitionId, Result<(), String>> {
self.last.lock().expect("not poisoned").clone()
}
diff --git a/compactor_scheduler/src/local_scheduler/partition_done_sink/mod.rs b/compactor_scheduler/src/local_scheduler/partition_done_sink/mod.rs
new file mode 100644
index 0000000000..2bc87c4a0f
--- /dev/null
+++ b/compactor_scheduler/src/local_scheduler/partition_done_sink/mod.rs
@@ -0,0 +1,32 @@
+pub(crate) mod catalog;
+pub(crate) mod mock;
+
+use std::{
+ fmt::{Debug, Display},
+ sync::Arc,
+};
+
+use async_trait::async_trait;
+use data_types::PartitionId;
+
+/// Dynamic error type that is used throughout the stack.
+pub(crate) type DynError = Box<dyn std::error::Error + Send + Sync>;
+
+/// Records "partition is done" status for given partition.
+#[async_trait]
+pub trait PartitionDoneSink: Debug + Display + Send + Sync {
+ /// Record "partition is done" status for given partition.
+ ///
+ /// This method should retry.
+ async fn record(&self, partition: PartitionId, res: Result<(), DynError>);
+}
+
+#[async_trait]
+impl<T> PartitionDoneSink for Arc<T>
+where
+ T: PartitionDoneSink + ?Sized,
+{
+ async fn record(&self, partition: PartitionId, res: Result<(), DynError>) {
+ self.as_ref().record(partition, res).await
+ }
+}
diff --git a/compactor_scheduler/src/scheduler.rs b/compactor_scheduler/src/scheduler.rs
index 13f5b8355d..eaaa17b881 100644
--- a/compactor_scheduler/src/scheduler.rs
+++ b/compactor_scheduler/src/scheduler.rs
@@ -1,10 +1,13 @@
-use std::fmt::{Debug, Display};
+use std::{
+ fmt::{Debug, Display},
+ sync::Arc,
+};
use async_trait::async_trait;
-use data_types::PartitionId;
+use data_types::{CompactionLevel, ParquetFile, ParquetFileId, ParquetFileParams, PartitionId};
use uuid::Uuid;
-use crate::LocalSchedulerConfig;
+use crate::{CommitWrapper, ErrorKind, LocalSchedulerConfig, PartitionsSourceConfig};
/// Scheduler configuration.
#[derive(Debug, Clone)]
@@ -13,6 +16,19 @@ pub enum SchedulerConfig {
Local(LocalSchedulerConfig),
}
+impl SchedulerConfig {
+ /// Create new [`LocalScheduler`](crate::LocalScheduler) config with a [`CommitWrapper`].
+ ///
+ /// This is useful for testing.
+ pub fn new_local_with_wrapper(commit_wrapper: Arc<dyn CommitWrapper>) -> Self {
+ Self::Local(LocalSchedulerConfig {
+ shard_config: None,
+ partitions_source_config: PartitionsSourceConfig::default(),
+ commit_wrapper: Some(commit_wrapper),
+ })
+ }
+}
+
impl Default for SchedulerConfig {
fn default() -> Self {
Self::Local(LocalSchedulerConfig::default())
@@ -23,11 +39,21 @@ impl std::fmt::Display for SchedulerConfig {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
SchedulerConfig::Local(LocalSchedulerConfig {
+ commit_wrapper,
shard_config,
partitions_source_config: _,
- }) => match &shard_config {
- None => write!(f, "local_compaction_scheduler"),
- Some(shard_config) => write!(f, "local_compaction_scheduler({shard_config})",),
+ }) => match (&shard_config, commit_wrapper) {
+ (None, None) => write!(f, "local_compaction_scheduler_cfg"),
+ (Some(shard_config), None) => {
+ write!(f, "local_compaction_scheduler_cfg({shard_config})",)
+ }
+ (Some(shard_config), Some(_)) => write!(
+ f,
+ "local_compaction_scheduler_cfg({shard_config},commit_wrapper=Some)",
+ ),
+ (None, Some(_)) => {
+ write!(f, "local_compaction_scheduler_cfg(commit_wrapper=Some)",)
+ }
},
}
}
@@ -54,9 +80,115 @@ impl CompactionJob {
}
}
+/// Commit update for a given partition.
+#[derive(Debug)]
+pub struct CommitUpdate {
+ /// Partition to be updated.
+ pub(crate) partition_id: PartitionId,
+ /// Files to be deleted.
+ pub(crate) delete: Vec<ParquetFile>,
+ /// Files to be upgraded.
+ pub(crate) upgrade: Vec<ParquetFile>,
+ /// Target level for upgraded files.
+ pub(crate) target_level: CompactionLevel,
+ /// Files to be created.
+ pub(crate) create: Vec<ParquetFileParams>,
+}
+
+impl CommitUpdate {
+ /// Create new commit update.
+ pub fn new(
+ partition_id: PartitionId,
+ delete: Vec<ParquetFile>,
+ upgrade: Vec<ParquetFile>,
+ create: Vec<ParquetFileParams>,
+ target_level: CompactionLevel,
+ ) -> Self {
+ Self {
+ partition_id,
+ delete,
+ upgrade,
+ target_level,
+ create,
+ }
+ }
+}
+
+/// Reason for skipping a partition.
+#[derive(Debug)]
+pub enum SkipReason {
+ /// Partition is not compactible, due to an encountered error.
+ CompactionError(String),
+}
+
+/// Status.
+#[derive(Debug)]
+pub enum CompactionJobStatusVariant {
+ /// Updates associated with ongoing compaction job.
+ Update(CommitUpdate),
+ /// Request to skip partition.
+ RequestToSkip(SkipReason),
+ /// Compaction job is complete.
+ Complete,
+ /// Compaction job has failed.
+ Error(ErrorKind),
+}
+
+/// Status ([`CompactionJobStatusVariant`]) associated with a [`CompactionJob`].
+#[derive(Debug)]
+pub struct CompactionJobStatus {
+ /// Job.
+ pub job: CompactionJob,
+ /// Status.
+ pub status: CompactionJobStatusVariant,
+}
+
+/// Status of a compaction job.
+#[derive(Debug)]
+pub enum CompactionJobStatusResult {
+ /// Ack only.
+ Ack,
+ /// Updates which were processed.
+ UpdatedParquetFiles(Vec<ParquetFileId>),
+}
+
/// Core trait used for all schedulers.
#[async_trait]
pub trait Scheduler: Send + Sync + Debug + Display {
/// Get partitions to be compacted.
async fn get_jobs(&self) -> Vec<CompactionJob>;
+
+ /// Update job status.
+ async fn job_status(
+ &self,
+ job_status: CompactionJobStatus,
+ ) -> Result<CompactionJobStatusResult, Box<dyn std::error::Error>>;
+}
+
+#[cfg(test)]
+mod tests {
+ use std::sync::Arc;
+
+ use crate::Commit;
+
+ use super::*;
+
+ #[test]
+ fn test_cfg_display_new_local_with_wrapper() {
+ #[derive(Debug)]
+ struct MockCommitWrapper;
+
+ impl CommitWrapper for MockCommitWrapper {
+ fn wrap(&self, commit: Arc<dyn Commit>) -> Arc<dyn Commit> {
+ commit
+ }
+ }
+
+ let config = SchedulerConfig::new_local_with_wrapper(Arc::new(MockCommitWrapper));
+
+ assert_eq!(
+ config.to_string(),
+ "local_compaction_scheduler_cfg(commit_wrapper=Some)"
+ );
+ }
}
diff --git a/compactor_test_utils/src/commit_wrapper.rs b/compactor_test_utils/src/commit_wrapper.rs
index 388c62fb5c..9fd8b3d53c 100644
--- a/compactor_test_utils/src/commit_wrapper.rs
+++ b/compactor_test_utils/src/commit_wrapper.rs
@@ -1,7 +1,7 @@
//! Handles recording commit information to the test run log
use async_trait::async_trait;
-use compactor::{Commit, CommitWrapper};
+use compactor_scheduler::{Commit, CommitWrapper};
use data_types::{CompactionLevel, ParquetFile, ParquetFileId, ParquetFileParams, PartitionId};
use std::{
fmt::{Debug, Display},
diff --git a/compactor_test_utils/src/lib.rs b/compactor_test_utils/src/lib.rs
index c0be0b889d..0001bc8e26 100644
--- a/compactor_test_utils/src/lib.rs
+++ b/compactor_test_utils/src/lib.rs
@@ -121,7 +121,7 @@ impl TestSetupBuilder<false> {
let config = Config {
metric_registry: catalog.metric_registry(),
catalog: catalog.catalog(),
- scheduler_config: SchedulerConfig::default(),
+ scheduler_config: SchedulerConfig::new_local_with_wrapper(Arc::new(commit_wrapper)),
parquet_store_real: catalog.parquet_store.clone(),
parquet_store_scratchpad: ParquetStorage::new(
Arc::new(object_store::memory::InMemory::new()),
@@ -144,7 +144,6 @@ impl TestSetupBuilder<false> {
process_once: true,
simulate_without_object_store: false,
parquet_files_sink_override: None,
- commit_wrapper: Some(Arc::new(commit_wrapper)),
all_errors_are_fatal: true,
max_num_columns_per_table: 200,
max_num_files_per_plan: 200,
diff --git a/ioxd_compactor/src/lib.rs b/ioxd_compactor/src/lib.rs
index 1ed4cdfe3d..513e69c590 100644
--- a/ioxd_compactor/src/lib.rs
+++ b/ioxd_compactor/src/lib.rs
@@ -184,7 +184,6 @@ pub async fn create_compactor_server_type(
process_once: compactor_config.process_once,
simulate_without_object_store: false,
parquet_files_sink_override: None,
- commit_wrapper: None,
all_errors_are_fatal: false,
max_num_columns_per_table: compactor_config.max_num_columns_per_table,
max_num_files_per_plan: compactor_config.max_num_files_per_plan,
diff --git a/ioxd_compactor/src/scheduler_config.rs b/ioxd_compactor/src/scheduler_config.rs
index ca3156d942..3be3eda2e3 100644
--- a/ioxd_compactor/src/scheduler_config.rs
+++ b/ioxd_compactor/src/scheduler_config.rs
@@ -66,6 +66,7 @@ fn convert_shard_config(config: ShardConfigForLocalScheduler) -> Option<ShardCon
pub(crate) fn convert_scheduler_config(config: CompactorSchedulerConfig) -> SchedulerConfig {
match config.compactor_scheduler_type {
CompactorSchedulerType::Local => SchedulerConfig::Local(LocalSchedulerConfig {
+ commit_wrapper: None,
partitions_source_config: convert_partitions_source_config(
config.partition_source_config,
),
|
8928c838a8f05db9e4ed5345ddbae5ddcf7c25c5
|
Dom Dwyer
|
2023-08-02 17:23:21
|
schema gossip w/ default partition keys
|
Ensure gossiping namespace & tables with empty partition keys is
correct.
| null |
test: schema gossip w/ default partition keys
Ensure gossiping namespace & tables with empty partition keys is
correct.
|
diff --git a/router/src/gossip/mod.rs b/router/src/gossip/mod.rs
index 9b7bdb91d6..d808ffbd6b 100644
--- a/router/src/gossip/mod.rs
+++ b/router/src/gossip/mod.rs
@@ -73,7 +73,7 @@ mod tests {
use data_types::{
partition_template::{
test_table_partition_override, NamespacePartitionTemplateOverride,
- PARTITION_BY_DAY_PROTO,
+ TablePartitionTemplateOverride, PARTITION_BY_DAY_PROTO,
},
Column, ColumnId, ColumnsByName, NamespaceId, NamespaceName, NamespaceSchema, TableId,
TableSchema,
@@ -191,4 +191,78 @@ mod tests {
// Ensuring the content is identical
assert_eq!(*got, schema);
}
+
+ // As above, but ensuring default partition templates propagate correctly.
+ #[tokio::test]
+ async fn test_integration_default_partition_templates() {
+ // Two adaptors that will plug one "node" into the other.
+ let gossip_a = Arc::new(GossipPipe::default());
+ let gossip_b = Arc::new(GossipPipe::default());
+
+ // Setup a cache for node A and wrap it in the gossip layer.
+ let node_a_cache = Arc::new(MemoryNamespaceCache::default());
+ let dispatcher_a = Arc::new(NamespaceSchemaGossip::new(Arc::clone(&node_a_cache)));
+ let dispatcher_a = GossipMessageDispatcher::new(dispatcher_a, 100);
+ let node_a = SchemaChangeObserver::new(Arc::clone(&node_a_cache), Arc::clone(&gossip_b));
+
+ // Setup a cache for node B.
+
+ let node_b_cache = Arc::new(MemoryNamespaceCache::default());
+ let dispatcher_b = Arc::new(NamespaceSchemaGossip::new(Arc::clone(&node_b_cache)));
+ let dispatcher_b = GossipMessageDispatcher::new(dispatcher_b, 100);
+ let node_b = SchemaChangeObserver::new(Arc::clone(&node_b_cache), Arc::clone(&gossip_b));
+
+ // Connect them together
+ gossip_a.set_dispatcher(dispatcher_a).await;
+ gossip_b.set_dispatcher(dispatcher_b).await;
+
+ // Fill in a table with a column to insert into A
+ let mut tables = BTreeMap::new();
+ tables.insert(
+ "platanos".to_string(),
+ TableSchema {
+ id: TableId::new(4242),
+ partition_template: TablePartitionTemplateOverride::try_new(
+ None,
+ &NamespacePartitionTemplateOverride::default(),
+ )
+ .unwrap(),
+ columns: ColumnsByName::new([Column {
+ id: ColumnId::new(1234),
+ table_id: TableId::new(4242),
+ name: "c1".to_string(),
+ column_type: data_types::ColumnType::U64,
+ }]),
+ },
+ );
+
+ // Wrap the tables into a schema
+ let namespace_name = NamespaceName::try_from("bananas").unwrap();
+ let schema = NamespaceSchema {
+ id: NamespaceId::new(4242),
+ tables,
+ max_columns_per_table: 1,
+ max_tables: 2,
+ retention_period_ns: Some(1234),
+ partition_template: NamespacePartitionTemplateOverride::default(),
+ };
+
+ // Put the new schema into A's cache
+ node_a.put_schema(namespace_name.clone(), schema.clone());
+
+ // And read it back in B
+ let got = async {
+ loop {
+ if let Ok(v) = node_b.get_schema(&namespace_name).await {
+ return v;
+ }
+ tokio::time::sleep(Duration::from_secs(1)).await;
+ }
+ }
+ .with_timeout_panic(Duration::from_secs(5))
+ .await;
+
+ // Ensuring the content is identical
+ assert_eq!(*got, schema);
+ }
}
|
90f8166b4438b229301a2270f958c4d45d370f88
|
Dom Dwyer
|
2023-02-23 18:01:56
|
increase partition bucket range
|
More buckets -> more better.
| null |
refactor(metric): increase partition bucket range
More buckets -> more better.
|
diff --git a/ingester2/src/query/result_instrumentation.rs b/ingester2/src/query/result_instrumentation.rs
index e3e1800531..d968af935d 100644
--- a/ingester2/src/query/result_instrumentation.rs
+++ b/ingester2/src/query/result_instrumentation.rs
@@ -166,7 +166,7 @@ impl<T> QueryResultInstrumentation<T> {
.register_metric_with_options::<U64Histogram, _>(
"ingester_query_result_partition",
"distribution of query result partition count sent to the client",
- || U64HistogramOptions::new([1, 2, 3, 4, 5]),
+ || U64HistogramOptions::new([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]),
)
.recorder(&[]);
|
9ab86fa1547c905b4a7984ac9f76adb65c14c682
|
Dom Dwyer
|
2023-01-05 14:06:29
|
drive ingester node (re)-discovery
|
The tonic / tower load-balance implementation discards failed nodes,
even when using a static list - this causes nodes that fail once to
never be retried.
This doesn't happen for the last node for some reason, and leads to all
the load from one router hitting a single ingester instead of load
balancing across all ingesters.
This commit adds a hack to constantly tell the load balancer to probe
all nodes, hopefully causing them to re-discover previously failed
nodes. I don't have the time to do this properly :(
| null |
fix(router2): drive ingester node (re)-discovery
The tonic / tower load-balance implementation discards failed nodes,
even when using a static list - this causes nodes that fail once to
never be retried.
This doesn't happen for the last node for some reason, and leads to all
the load from one router hitting a single ingester instead of load
balancing across all ingesters.
This commit adds a hack to constantly tell the load balancer to probe
all nodes, hopefully causing them to re-discover previously failed
nodes. I don't have the time to do this properly :(
|
diff --git a/Cargo.lock b/Cargo.lock
index ba6d68dbb0..0880f7098d 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -4626,6 +4626,7 @@ dependencies = [
"tokio",
"tokio-stream",
"tonic",
+ "tower",
"trace",
"workspace-hack",
"write_buffer",
diff --git a/router/Cargo.toml b/router/Cargo.toml
index 29caef350a..e02c01a15b 100644
--- a/router/Cargo.toml
+++ b/router/Cargo.toml
@@ -43,6 +43,7 @@ trace = { path = "../trace/" }
workspace-hack = { path = "../workspace-hack"}
write_buffer = { path = "../write_buffer" }
write_summary = { path = "../write_summary" }
+tower = { version = "0.4.13", features = ["balance"] }
[dev-dependencies]
assert_matches = "1.5"
diff --git a/router/src/dml_handlers/rpc_write.rs b/router/src/dml_handlers/rpc_write.rs
index 2e5f4d0088..27add424dc 100644
--- a/router/src/dml_handlers/rpc_write.rs
+++ b/router/src/dml_handlers/rpc_write.rs
@@ -25,9 +25,36 @@ pub fn build_ingester_connection<T>(addrs: impl Iterator<Item = T>) -> WriteServ
where
T: AsRef<str>,
{
- WriteServiceClient::new(Channel::balance_list(
- addrs.map(|s| Endpoint::from_str(s.as_ref()).expect("invalid ingester address")),
- ))
+ let endpoints = addrs
+ .map(|s| Endpoint::from_str(s.as_ref()).expect("invalid ingester address"))
+ .collect::<Vec<_>>();
+
+ let (channel, tx) = Channel::balance_channel(endpoints.len());
+
+ // BUG: tower balance removes failed nodes from the pool, except the last
+ // node in the pool, which leads to a router talking to one ingester.
+ //
+ // As an absolute hack, keep inserting the nodes into the pool to drive
+ // discovery after they have failed.
+ //
+ // https://github.com/influxdata/influxdb_iox/issues/6508
+ //
+ tokio::spawn(async move {
+ loop {
+ for e in &endpoints {
+ tx.send(tower::discover::Change::Insert(
+ e.uri().to_owned(),
+ e.clone(),
+ ))
+ .await
+ .expect("no grpc balance receiver");
+ }
+
+ tokio::time::sleep(Duration::from_secs(5)).await;
+ }
+ });
+
+ WriteServiceClient::new(channel)
}
/// The bound on RPC request duration.
|
b87d572e4269bc3cf3ceeca8468d0a353322c2f6
|
Dom Dwyer
|
2022-12-19 11:25:20
|
single PartitionResponse constructor
|
Removes the PartitionResponse::new_no_batches() constructor, instead
using an Option-wrapped data. Before that would have been confusing
(many Option in the constructor signature) but now there's only one!
| null |
refactor: single PartitionResponse constructor
Removes the PartitionResponse::new_no_batches() constructor, instead
using an Option-wrapped data. Before that would have been confusing
(many Option in the constructor signature) but now there's only one!
|
diff --git a/ingester2/src/buffer_tree/table.rs b/ingester2/src/buffer_tree/table.rs
index d691e93e0c..0ed2db7fae 100644
--- a/ingester2/src/buffer_tree/table.rs
+++ b/ingester2/src/buffer_tree/table.rs
@@ -294,9 +294,9 @@ where
let data = Box::pin(MemoryStream::new(
data.project_selection(selection).into_iter().collect(),
));
- PartitionResponse::new(data, id, completed_persistence_count)
+ PartitionResponse::new(Some(data), id, completed_persistence_count)
}
- None => PartitionResponse::new_no_batches(id, completed_persistence_count),
+ None => PartitionResponse::new(None, id, completed_persistence_count),
};
span.ok("read partition data");
diff --git a/ingester2/src/query/partition_response.rs b/ingester2/src/query/partition_response.rs
index 5221199666..43c5802749 100644
--- a/ingester2/src/query/partition_response.rs
+++ b/ingester2/src/query/partition_response.rs
@@ -38,20 +38,12 @@ impl std::fmt::Debug for PartitionResponse {
impl PartitionResponse {
pub(crate) fn new(
- batches: SendableRecordBatchStream,
+ data: Option<SendableRecordBatchStream>,
id: PartitionId,
completed_persistence_count: u64,
) -> Self {
Self {
- batches: Some(batches),
- id,
- completed_persistence_count,
- }
- }
-
- pub(crate) fn new_no_batches(id: PartitionId, completed_persistence_count: u64) -> Self {
- Self {
- batches: None,
+ batches: data,
id,
completed_persistence_count,
}
|
3e4db81bc6c3b6ee4b0baeb907c2523e84105269
|
Marco Neumann
|
2022-10-24 18:12:42
|
make `SchemaBuilder::field` fallible
|
It would be nice if the IOx data type would not be optional and this is
a prep clean-up to achieve that.
| null |
refactor: make `SchemaBuilder::field` fallible
It would be nice if the IOx data type would not be optional and this is
a prep clean-up to achieve that.
|
diff --git a/iox_query/src/provider.rs b/iox_query/src/provider.rs
index e97eaa3d87..112e34b4f0 100644
--- a/iox_query/src/provider.rs
+++ b/iox_query/src/provider.rs
@@ -2006,6 +2006,7 @@ mod test {
// request just the field and timestamp
let schema = SchemaBuilder::new()
.field("field_int", DataType::Int64)
+ .unwrap()
.timestamp()
.build()
.unwrap();
@@ -2101,7 +2102,9 @@ mod test {
// request just the fields
let schema = SchemaBuilder::new()
.field("field_int", DataType::Int64)
+ .unwrap()
.field("other_field_int", DataType::Int64)
+ .unwrap()
.build()
.unwrap();
@@ -2416,6 +2419,7 @@ mod test {
// request just the field and timestamp
let schema = SchemaBuilder::new()
.field("field_int", DataType::Int64)
+ .unwrap()
.timestamp()
.build()
.unwrap();
diff --git a/iox_query/src/test.rs b/iox_query/src/test.rs
index e7a0503f1c..497d932e41 100644
--- a/iox_query/src/test.rs
+++ b/iox_query/src/test.rs
@@ -267,6 +267,7 @@ macro_rules! impl_with_column {
let new_column_schema = SchemaBuilder::new()
.field(&column_name, DataType::$DATA_TYPE)
+ .unwrap()
.build()
.unwrap();
self.add_schema_to_table(new_column_schema, true, None)
@@ -282,6 +283,7 @@ macro_rules! impl_with_column_no_stats {
let new_column_schema = SchemaBuilder::new()
.field(&column_name, DataType::$DATA_TYPE)
+ .unwrap()
.build()
.unwrap();
@@ -303,6 +305,7 @@ macro_rules! impl_with_column_with_stats {
let new_column_schema = SchemaBuilder::new()
.field(&column_name, DataType::$DATA_TYPE)
+ .unwrap()
.build()
.unwrap();
@@ -525,6 +528,7 @@ impl TestChunk {
// merge it in to any existing schema
let new_column_schema = SchemaBuilder::new()
.field(&column_name, DataType::Utf8)
+ .unwrap()
.build()
.unwrap();
diff --git a/iox_query/src/util.rs b/iox_query/src/util.rs
index d21dc824a8..f5e268814b 100644
--- a/iox_query/src/util.rs
+++ b/iox_query/src/util.rs
@@ -288,10 +288,15 @@ mod tests {
let schema = SchemaBuilder::new()
.tag("tag")
.field("str", DataType::Utf8)
+ .unwrap()
.field("int", DataType::Int64)
+ .unwrap()
.field("uint", DataType::UInt64)
+ .unwrap()
.field("float", DataType::Float64)
+ .unwrap()
.field("bool", DataType::Boolean)
+ .unwrap()
.build()
.unwrap();
diff --git a/predicate/src/lib.rs b/predicate/src/lib.rs
index 7f0b9782ac..476169bae2 100644
--- a/predicate/src/lib.rs
+++ b/predicate/src/lib.rs
@@ -789,7 +789,9 @@ mod tests {
let schema = SchemaBuilder::new()
.field("foo", ArrowDataType::Int64)
+ .unwrap()
.field("bar", ArrowDataType::Int64)
+ .unwrap()
.timestamp()
.build()
.unwrap();
@@ -896,6 +898,7 @@ mod tests {
let schema = SchemaBuilder::new()
.field("foo", ArrowDataType::Int64)
+ .unwrap()
.timestamp()
.build()
.unwrap();
diff --git a/predicate/src/rpc_predicate.rs b/predicate/src/rpc_predicate.rs
index 89fb1803c9..4b4257e799 100644
--- a/predicate/src/rpc_predicate.rs
+++ b/predicate/src/rpc_predicate.rs
@@ -454,7 +454,9 @@ mod tests {
.tag("t1")
.tag("t2")
.field("f1", DataType::Int64)
+ .unwrap()
.field("f2", DataType::Int64)
+ .unwrap()
.build()
.unwrap();
diff --git a/predicate/src/rpc_predicate/column_rewrite.rs b/predicate/src/rpc_predicate/column_rewrite.rs
index a6c8d55f3f..cc925c5aae 100644
--- a/predicate/src/rpc_predicate/column_rewrite.rs
+++ b/predicate/src/rpc_predicate/column_rewrite.rs
@@ -91,6 +91,7 @@ mod tests {
let schema = SchemaBuilder::new()
.tag("t1")
.field("f1", DataType::Int64)
+ .unwrap()
.build()
.unwrap();
diff --git a/predicate/src/rpc_predicate/field_rewrite.rs b/predicate/src/rpc_predicate/field_rewrite.rs
index 8ccd354d19..af3baf2f87 100644
--- a/predicate/src/rpc_predicate/field_rewrite.rs
+++ b/predicate/src/rpc_predicate/field_rewrite.rs
@@ -505,9 +505,13 @@ mod tests {
.tag("foo")
.tag("bar")
.field("f1", DataType::Float64)
+ .unwrap()
.field("f2", DataType::Float64)
+ .unwrap()
.field("f3", DataType::Float64)
+ .unwrap()
.field("f4", DataType::Float64)
+ .unwrap()
.build()
.map(Arc::new)
.unwrap()
diff --git a/querier/src/cache/namespace.rs b/querier/src/cache/namespace.rs
index d9b55cb05b..a01de4599b 100644
--- a/querier/src/cache/namespace.rs
+++ b/querier/src/cache/namespace.rs
@@ -327,6 +327,7 @@ mod tests {
schema: Arc::new(
SchemaBuilder::new()
.field("col1", DataType::Int64)
+ .unwrap()
.tag("col2")
.timestamp()
.build()
@@ -346,6 +347,7 @@ mod tests {
schema: Arc::new(
SchemaBuilder::new()
.field("col1", DataType::Float64)
+ .unwrap()
.timestamp()
.build()
.unwrap(),
diff --git a/querier/src/chunk/mod.rs b/querier/src/chunk/mod.rs
index 03b9b8a3d7..bc053e1e1f 100644
--- a/querier/src/chunk/mod.rs
+++ b/querier/src/chunk/mod.rs
@@ -510,6 +510,7 @@ pub mod tests {
fn assert_schema(chunk: &QuerierChunk) {
let expected_schema = SchemaBuilder::new()
.field("field_int", DataType::Int64)
+ .unwrap()
.tag("tag1")
.timestamp()
.build()
diff --git a/querier/src/ingester/mod.rs b/querier/src/ingester/mod.rs
index bf959d0e14..e09e024cf5 100644
--- a/querier/src/ingester/mod.rs
+++ b/querier/src/ingester/mod.rs
@@ -1955,6 +1955,7 @@ mod tests {
let expected_schema = Arc::new(
SchemaBuilder::new()
.field("b", DataType::Boolean)
+ .unwrap()
.timestamp()
.build()
.unwrap(),
diff --git a/query_tests/src/table_schema.rs b/query_tests/src/table_schema.rs
index 359ba1ce49..2f8b438b2e 100644
--- a/query_tests/src/table_schema.rs
+++ b/query_tests/src/table_schema.rs
@@ -85,6 +85,7 @@ async fn list_schema_cpu_all() {
.tag("region")
.timestamp()
.field("user", DataType::Float64)
+ .unwrap()
.build()
.unwrap();
@@ -107,6 +108,7 @@ async fn list_schema_cpu_all_set_sort_key() {
.tag("region")
.timestamp()
.field("user", DataType::Float64)
+ .unwrap()
.build()
.unwrap();
@@ -127,6 +129,7 @@ async fn list_schema_disk_all() {
// we expect columns to come out in lexicographic order by name
let expected_schema = SchemaBuilder::new()
.field("bytes", DataType::Int64)
+ .unwrap()
.tag("region")
.timestamp()
.build()
@@ -146,6 +149,7 @@ async fn list_schema_disk_all() {
async fn list_schema_cpu_selection() {
let expected_schema = SchemaBuilder::new()
.field("user", DataType::Float64)
+ .unwrap()
.tag("region")
.build()
.unwrap();
@@ -162,6 +166,7 @@ async fn list_schema_disk_selection() {
let expected_schema = SchemaBuilder::new()
.timestamp()
.field("bytes", DataType::Int64)
+ .unwrap()
.build()
.unwrap();
@@ -176,6 +181,7 @@ async fn list_schema_location_all() {
// we expect columns to come out in lexicographic order by name
let expected_schema = SchemaBuilder::new()
.field("count", DataType::UInt64)
+ .unwrap()
.timestamp()
.tag("town")
.build()
diff --git a/schema/src/builder.rs b/schema/src/builder.rs
index 89cf0a402a..2e5a8aaf29 100644
--- a/schema/src/builder.rs
+++ b/schema/src/builder.rs
@@ -70,34 +70,34 @@ impl SchemaBuilder {
pub fn influx_column(&mut self, column_name: &str, column_type: InfluxColumnType) -> &mut Self {
match column_type {
InfluxColumnType::Tag => self.tag(column_name),
- InfluxColumnType::Field(influx_field_type) => {
- self.field(column_name, influx_field_type.into())
- }
+ InfluxColumnType::Field(influx_field_type) => self
+ .field(column_name, influx_field_type.into())
+ .expect("just converted this from a valid type"),
InfluxColumnType::Timestamp => self.timestamp(),
}
}
/// Add a new nullable field column with the specified Arrow datatype.
- pub fn field(&mut self, column_name: &str, arrow_type: ArrowDataType) -> &mut Self {
- let influxdb_column_type = arrow_type
- .clone()
- .try_into()
- .map(InfluxColumnType::Field)
- .ok();
-
- self.add_column(column_name, true, influxdb_column_type, arrow_type)
+ pub fn field(
+ &mut self,
+ column_name: &str,
+ arrow_type: ArrowDataType,
+ ) -> Result<&mut Self, &'static str> {
+ let influxdb_column_type = arrow_type.clone().try_into().map(InfluxColumnType::Field)?;
+
+ Ok(self.add_column(column_name, true, Some(influxdb_column_type), arrow_type))
}
/// Add a new field column with the specified Arrow datatype that can not be
/// null
- pub fn non_null_field(&mut self, column_name: &str, arrow_type: ArrowDataType) -> &mut Self {
- let influxdb_column_type = arrow_type
- .clone()
- .try_into()
- .map(InfluxColumnType::Field)
- .ok();
-
- self.add_column(column_name, false, influxdb_column_type, arrow_type)
+ pub fn non_null_field(
+ &mut self,
+ column_name: &str,
+ arrow_type: ArrowDataType,
+ ) -> Result<&mut Self, &'static str> {
+ let influxdb_column_type = arrow_type.clone().try_into().map(InfluxColumnType::Field)?;
+
+ Ok(self.add_column(column_name, false, Some(influxdb_column_type), arrow_type))
}
/// Add the InfluxDB data model timestamp column
@@ -241,8 +241,9 @@ mod test {
fn test_builder_field() {
let s = SchemaBuilder::new()
.field("the_influx_field", ArrowDataType::Float64)
- // can't represent with lp
- .field("the_no_influx_field", ArrowDataType::Decimal128(10, 0))
+ .unwrap()
+ .field("the_other_influx_field", ArrowDataType::Int64)
+ .unwrap()
.build()
.unwrap();
@@ -253,10 +254,10 @@ mod test {
assert_eq!(influxdb_column_type, Some(Field(Float)));
let (influxdb_column_type, field) = s.field(1);
- assert_eq!(field.name(), "the_no_influx_field");
- assert_eq!(field.data_type(), &ArrowDataType::Decimal128(10, 0));
+ assert_eq!(field.name(), "the_other_influx_field");
+ assert_eq!(field.data_type(), &ArrowDataType::Int64);
assert!(field.is_nullable());
- assert_eq!(influxdb_column_type, None);
+ assert_eq!(influxdb_column_type, Some(Field(Integer)));
assert_eq!(s.len(), 2);
}
@@ -281,8 +282,9 @@ mod test {
fn test_builder_non_field() {
let s = SchemaBuilder::new()
.non_null_field("the_influx_field", ArrowDataType::Float64)
- // can't represent with lp
- .non_null_field("the_no_influx_field", ArrowDataType::Decimal128(10, 0))
+ .unwrap()
+ .non_null_field("the_other_influx_field", ArrowDataType::Int64)
+ .unwrap()
.build()
.unwrap();
@@ -293,10 +295,10 @@ mod test {
assert_eq!(influxdb_column_type, Some(Field(Float)));
let (influxdb_column_type, field) = s.field(1);
- assert_eq!(field.name(), "the_no_influx_field");
- assert_eq!(field.data_type(), &ArrowDataType::Decimal128(10, 0));
+ assert_eq!(field.name(), "the_other_influx_field");
+ assert_eq!(field.data_type(), &ArrowDataType::Int64);
assert!(!field.is_nullable());
- assert_eq!(influxdb_column_type, None);
+ assert_eq!(influxdb_column_type, Some(Field(Integer)));
assert_eq!(s.len(), 2);
}
diff --git a/schema/src/lib.rs b/schema/src/lib.rs
index 97a1a38541..739499090e 100644
--- a/schema/src/lib.rs
+++ b/schema/src/lib.rs
@@ -1026,8 +1026,11 @@ mod test {
fn test_sort_fields_by_name_already_sorted() {
let schema = SchemaBuilder::new()
.field("field_a", ArrowDataType::Int64)
+ .unwrap()
.field("field_b", ArrowDataType::Int64)
+ .unwrap()
.field("field_c", ArrowDataType::Int64)
+ .unwrap()
.build()
.unwrap();
@@ -1044,8 +1047,11 @@ mod test {
fn test_sort_fields_by_name() {
let schema = SchemaBuilder::new()
.field("field_b", ArrowDataType::Int64)
+ .unwrap()
.field("field_a", ArrowDataType::Int64)
+ .unwrap()
.field("field_c", ArrowDataType::Int64)
+ .unwrap()
.build()
.unwrap();
@@ -1053,8 +1059,11 @@ mod test {
let expected_schema = SchemaBuilder::new()
.field("field_a", ArrowDataType::Int64)
+ .unwrap()
.field("field_b", ArrowDataType::Int64)
+ .unwrap()
.field("field_c", ArrowDataType::Int64)
+ .unwrap()
.build()
.unwrap();
diff --git a/schema/src/merge.rs b/schema/src/merge.rs
index a50e7290d2..9dafc1846b 100644
--- a/schema/src/merge.rs
+++ b/schema/src/merge.rs
@@ -418,16 +418,16 @@ mod tests {
#[test]
fn test_merge_incompatible_data_types() {
// same field name with different type
- let schema1 = SchemaBuilder::new()
- .field("the_field", ArrowDataType::Int16)
- .build()
- .unwrap();
+ let schema1 = Schema::try_from_arrow(Arc::new(arrow::datatypes::Schema::new(vec![
+ arrow::datatypes::Field::new("the_field", ArrowDataType::Int16, true),
+ ])))
+ .unwrap();
// same field name with different type
- let schema2 = SchemaBuilder::new()
- .field("the_field", ArrowDataType::Int8)
- .build()
- .unwrap();
+ let schema2 = Schema::try_from_arrow(Arc::new(arrow::datatypes::Schema::new(vec![
+ arrow::datatypes::Field::new("the_field", ArrowDataType::Int8, true),
+ ])))
+ .unwrap();
let merged_schema_error = SchemaMerger::new()
.merge(&schema1)
@@ -461,12 +461,14 @@ mod tests {
fn test_merge_incompatible_schema_nullability() {
let schema1 = SchemaBuilder::new()
.non_null_field("int_field", ArrowDataType::Int64)
+ .unwrap()
.build()
.unwrap();
// same field name with different nullability
let schema2 = SchemaBuilder::new()
.field("int_field", ArrowDataType::Int64)
+ .unwrap()
.build()
.unwrap();
diff --git a/service_grpc_influxrpc/src/expr.rs b/service_grpc_influxrpc/src/expr.rs
index bb448bbd97..94dbde3429 100644
--- a/service_grpc_influxrpc/src/expr.rs
+++ b/service_grpc_influxrpc/src/expr.rs
@@ -908,7 +908,9 @@ mod tests {
.tag("t2")
.tag("host")
.field("foo", DataType::Int64)
+ .unwrap()
.field("bar", DataType::Int64)
+ .unwrap()
.build()
.unwrap();
@@ -918,6 +920,7 @@ mod tests {
let schema = SchemaBuilder::new()
.tag("t3")
.field("baz", DataType::Int64)
+ .unwrap()
.build()
.unwrap();
|
e822374270c41335bd0cf1b3f5b9acec5c9b9454
|
Marco Neumann
|
2023-07-24 15:28:04
|
build annotated OCI images (#8301)
|
* refactor: isolate docker build to script
* chore: add labels to docker image
* chore: export image as OCI
* chore: print image digest
* fix: convert to OCI BEFORE calculating digest
* fix: use digest of uploaded image, not of the local archive
---------
|
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
chore: build annotated OCI images (#8301)
* refactor: isolate docker build to script
* chore: add labels to docker image
* chore: export image as OCI
* chore: print image digest
* fix: convert to OCI BEFORE calculating digest
* fix: use digest of uploaded image, not of the local archive
---------
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
|
diff --git a/.circleci/config.yml b/.circleci/config.yml
index 5a2a6c71ef..4a556a986d 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -387,25 +387,15 @@ jobs:
command: |
COMMIT_SHA="$(git rev-parse HEAD)"
- RUST_VERSION="$(sed -E -ne 's/channel = "(.*)"/\1/p' rust-toolchain.toml)"
+ .circleci/docker_build_release.bash \
+ "influxdb_iox" \
+ "aws,gcp,azure,jemalloc_replacing_malloc,tokio_console,pprof" \
+ "quay.io/influxdb/iox:$COMMIT_SHA"
- docker buildx build \
- --build-arg CARGO_INCREMENTAL="no" \
- --build-arg CARGO_NET_GIT_FETCH_WITH_CLI="true" \
- --build-arg FEATURES="aws,gcp,azure,jemalloc_replacing_malloc,tokio_console,pprof" \
- --build-arg RUST_VERSION="$RUST_VERSION" \
- --progress plain \
- --tag quay.io/influxdb/iox:"$COMMIT_SHA" \
- .
- docker buildx build \
- --build-arg CARGO_INCREMENTAL="no" \
- --build-arg CARGO_NET_GIT_FETCH_WITH_CLI="true" \
- --build-arg FEATURES="" \
- --build-arg PACKAGE="iox_data_generator" \
- --build-arg RUST_VERSION="$RUST_VERSION" \
- --progress plain \
- --tag quay.io/influxdb/iox_data_generator:"$COMMIT_SHA" \
- .
+ .circleci/docker_build_release.bash \
+ "iox_data_generator" \
+ "" \
+ "quay.io/influxdb/iox_data_generator:$COMMIT_SHA"
docker run -it --rm quay.io/influxdb/iox:$COMMIT_SHA debug print-cpu
@@ -452,14 +442,25 @@ jobs:
for image in "${images[@]}"; do
echo "Image: $image"
+ oci_path="oci-archive:///tmp/images/$image.oci.tar"
+
+ # convert the gzipped docker image into OCI
gzip -d "/tmp/images/$image.tar.gz"
+ skopeo copy --format oci --quiet "docker-archive:///tmp/images/$image.tar" "$oci_path"
for registry in "${registries[@]}"; do
echo " Registry: $registry"
+ # upload all tags
+ # Note: Uploading the 2nd tag for the same image (to the same registry) is very cheap since all layers
+ # exist already (from the previous tag).
for tag in "${tags[@]}"; do
echo " Upload: tag=$tag"
- skopeo copy "docker-archive:///tmp/images/$image.tar" "docker://$registry/$image:$tag" --quiet
+ docker_url="docker://$registry/$image:$tag"
+ skopeo copy --quiet "$oci_path" "$docker_url"
+
+ # print out digest AFTER upload, see https://github.com/containers/skopeo/issues/469
+ echo " Digest: $(skopeo inspect "$docker_url" | jq ".Digest")"
done
done
done
diff --git a/.circleci/docker_build_release.bash b/.circleci/docker_build_release.bash
new file mode 100755
index 0000000000..70c5e32fe8
--- /dev/null
+++ b/.circleci/docker_build_release.bash
@@ -0,0 +1,31 @@
+#!/usr/bin/env bash
+
+set -euo pipefail
+
+readonly PACKAGE="$1"
+readonly FEATURES="$2"
+readonly TAG="$3"
+
+RUST_VERSION="$(sed -E -ne 's/channel = "(.*)"/\1/p' rust-toolchain.toml)"
+COMMIT_SHA="$(git rev-parse HEAD)"
+COMMIT_TS="$(env TZ=UTC0 git show --quiet --date='format-local:%Y-%m-%dT%H:%M:%SZ' --format="%cd" HEAD)"
+NOW="$(date --utc --iso-8601=seconds)"
+REPO_URL="https://github.com/influxdata/influxdb_iox"
+
+exec docker buildx build \
+ --build-arg CARGO_INCREMENTAL="no" \
+ --build-arg CARGO_NET_GIT_FETCH_WITH_CLI="true" \
+ --build-arg FEATURES="$FEATURES" \
+ --build-arg RUST_VERSION="$RUST_VERSION" \
+ --build-arg PACKAGE="$PACKAGE" \
+ --label org.opencontainers.image.created="$NOW" \
+ --label org.opencontainers.image.url="$REPO_URL" \
+ --label org.opencontainers.image.revision="$COMMIT_SHA" \
+ --label org.opencontainers.image.vendor="InfluxData Inc." \
+ --label org.opencontainers.image.title="InfluxDB IOx, '$PACKAGE'" \
+ --label org.opencontainers.image.description="InfluxDB IOx production image for package '$PACKAGE'" \
+ --label com.influxdata.image.commit-date="$COMMIT_TS" \
+ --label com.influxdata.image.package="$PACKAGE" \
+ --progress plain \
+ --tag "$TAG" \
+ .
|
2acbaefa183801c5b45eed08a679820897df8f9b
|
Dom Dwyer
|
2023-06-15 11:26:55
|
correct dedupe of strftime values
|
This fixes the root cause of influxdata/idpe#17765; the code was
performing a "is this the last value you saw" check by comparing it to
the last generated partition key which is not the same thing - a cache
hit would not generate a new key, and therefore would not return the
correct answer after.
The end result is that for a subset of writes with a problematic
sequence of timestamps would cause the wrong partition key to be
assigned. Because all users are using the default YYYY-MM-DD
partitioning scheme, the impact was relatively low, as most of the time
that partition key had the same YYYY-MM-DD representation as the last.
| null |
fix: correct dedupe of strftime values
This fixes the root cause of influxdata/idpe#17765; the code was
performing a "is this the last value you saw" check by comparing it to
the last generated partition key which is not the same thing - a cache
hit would not generate a new key, and therefore would not return the
correct answer after.
The end result is that for a subset of writes with a problematic
sequence of timestamps would cause the wrong partition key to be
assigned. Because all users are using the default YYYY-MM-DD
partitioning scheme, the impact was relatively low, as most of the time
that partition key had the same YYYY-MM-DD representation as the last.
|
diff --git a/mutable_batch/src/payload/partition.rs b/mutable_batch/src/payload/partition.rs
index d30adea4c7..a37f385a97 100644
--- a/mutable_batch/src/payload/partition.rs
+++ b/mutable_batch/src/payload/partition.rs
@@ -347,6 +347,39 @@ mod tests {
StdRng::seed_from_u64(seed)
}
+ /// Reproducer for https://github.com/influxdata/idpe/issues/17765
+ #[test]
+ fn test_equals_last() {
+ let ts = [
+ 1686756903736785920, // last_eq=false, render, set last_ptr
+ 42, // last_eq=false, render, set last_ptr
+ 1686756903736785920, // last_eq=false, re-use, don't change last_ptr
+ 1686756903736785920, // last_eq=false, re-use, don't change last_ptr
+ 42, // last_eq=true (wrong), re-use
+ ];
+
+ let mut batch = MutableBatch::new();
+ let mut writer = Writer::new(&mut batch, ts.len());
+
+ writer.write_time("time", ts.into_iter()).unwrap();
+ writer.commit();
+
+ let keys =
+ generate_denormalised_keys(&batch, TablePartitionTemplateOverride::default().parts())
+ .unwrap();
+
+ assert_eq!(
+ keys,
+ &[
+ "2023-06-14",
+ "1970-01-01",
+ "2023-06-14",
+ "2023-06-14",
+ "1970-01-01",
+ ]
+ );
+ }
+
/// Generates a vector of partition key strings, or an error.
///
/// This function normalises the de-duplicated output of
diff --git a/mutable_batch/src/payload/partition/strftime.rs b/mutable_batch/src/payload/partition/strftime.rs
index 73c788256e..0aa7f9e4d4 100644
--- a/mutable_batch/src/payload/partition/strftime.rs
+++ b/mutable_batch/src/payload/partition/strftime.rs
@@ -74,11 +74,6 @@ where
}
None
}
-
- /// Return the last wrote value, if any.
- fn last(&self) -> Option<&'_ T> {
- self.buf[self.last_idx].as_ref()
- }
}
/// A strftime-like formatter of epoch timestamps with nanosecond granularity.
@@ -147,6 +142,14 @@ pub(super) struct StrftimeFormatter<'a> {
/// A set of 5 most recently added timestamps, and the formatted string they
/// map to.
values: RingBuffer<5, (i64, String)>,
+
+ /// The last observed timestamp.
+ ///
+ /// This value changes each time a timestamp is returned to the user, either
+ /// from the cache of pre-generated strings, or by generating a new one and
+ /// MUST always track the last timestamp given to
+ /// [`StrftimeFormatter::render()`].
+ last_ts: Option<i64>,
}
impl<'a> StrftimeFormatter<'a> {
@@ -170,6 +173,7 @@ impl<'a> StrftimeFormatter<'a> {
format: StrftimeItems::new(format),
is_ymd_format: is_default_format,
values: RingBuffer::default(),
+ last_ts: None,
}
}
@@ -182,6 +186,9 @@ impl<'a> StrftimeFormatter<'a> {
// Optionally apply the default format reduction optimisation.
let timestamp = self.maybe_reduce(timestamp);
+ // Retain this timestamp as the last observed timestamp.
+ self.last_ts = Some(timestamp);
+
// Check if this timestamp has already been rendered.
if let Some(v) = self.values.find(|(t, _v)| *t == timestamp) {
// It has! Re-use the existing formatted string.
@@ -239,10 +246,7 @@ impl<'a> StrftimeFormatter<'a> {
// Optionally apply the default format reduction optimisation.
let timestamp = self.maybe_reduce(timestamp);
- self.values
- .last()
- .map(|(ts, _)| *ts == timestamp)
- .unwrap_or_default()
+ self.last_ts.map(|v| v == timestamp).unwrap_or_default()
}
}
@@ -302,17 +306,6 @@ mod tests {
assert_eq!(fmt.values.last_idx, 1);
}
- #[test]
- fn test_ring_buffer_equals_last() {
- let mut b = RingBuffer::<4, _>::default();
-
- assert!(b.find(|v| *v == 42).is_none());
-
- *b.next_slot() = 42;
-
- assert_eq!(b.last(), Some(&42));
- }
-
const FORMATTER_SPEC_PARTS: &[&str] = &[
"%Y", "%m", "%d", "%H", "%m", "%.9f", "%r", "%+", "%t", "%n", "%A", "%c",
];
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.